1f931551bSRalph Campbell /* 2f931551bSRalph Campbell * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved. 3f931551bSRalph Campbell * 4f931551bSRalph Campbell * This software is available to you under a choice of one of two 5f931551bSRalph Campbell * licenses. You may choose to be licensed under the terms of the GNU 6f931551bSRalph Campbell * General Public License (GPL) Version 2, available from the file 7f931551bSRalph Campbell * COPYING in the main directory of this source tree, or the 8f931551bSRalph Campbell * OpenIB.org BSD license below: 9f931551bSRalph Campbell * 10f931551bSRalph Campbell * Redistribution and use in source and binary forms, with or 11f931551bSRalph Campbell * without modification, are permitted provided that the following 12f931551bSRalph Campbell * conditions are met: 13f931551bSRalph Campbell * 14f931551bSRalph Campbell * - Redistributions of source code must retain the above 15f931551bSRalph Campbell * copyright notice, this list of conditions and the following 16f931551bSRalph Campbell * disclaimer. 17f931551bSRalph Campbell * 18f931551bSRalph Campbell * - Redistributions in binary form must reproduce the above 19f931551bSRalph Campbell * copyright notice, this list of conditions and the following 20f931551bSRalph Campbell * disclaimer in the documentation and/or other materials 21f931551bSRalph Campbell * provided with the distribution. 22f931551bSRalph Campbell * 23f931551bSRalph Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24f931551bSRalph Campbell * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25f931551bSRalph Campbell * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26f931551bSRalph Campbell * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27f931551bSRalph Campbell * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28f931551bSRalph Campbell * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29f931551bSRalph Campbell * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30f931551bSRalph Campbell * SOFTWARE. 31f931551bSRalph Campbell */ 32f931551bSRalph Campbell 33f931551bSRalph Campbell #include <linux/spinlock.h> 34f931551bSRalph Campbell #include <linux/pci.h> 35f931551bSRalph Campbell #include <linux/io.h> 36f931551bSRalph Campbell #include <linux/delay.h> 37f931551bSRalph Campbell #include <linux/netdevice.h> 38f931551bSRalph Campbell #include <linux/vmalloc.h> 39fec14d2fSPaul Gortmaker #include <linux/moduleparam.h> 40f931551bSRalph Campbell 41f931551bSRalph Campbell #include "qib.h" 42f931551bSRalph Campbell 43f931551bSRalph Campbell static unsigned qib_hol_timeout_ms = 3000; 44f931551bSRalph Campbell module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO); 45f931551bSRalph Campbell MODULE_PARM_DESC(hol_timeout_ms, 46f931551bSRalph Campbell "duration of user app suspension after link failure"); 47f931551bSRalph Campbell 48f931551bSRalph Campbell unsigned qib_sdma_fetch_arb = 1; 49f931551bSRalph Campbell module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO); 50f931551bSRalph Campbell MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration"); 51f931551bSRalph Campbell 52f931551bSRalph Campbell /** 53f931551bSRalph Campbell * qib_disarm_piobufs - cancel a range of PIO buffers 54f931551bSRalph Campbell * @dd: the qlogic_ib device 55f931551bSRalph Campbell * @first: the first PIO buffer to cancel 56f931551bSRalph Campbell * @cnt: the number of PIO buffers to cancel 57f931551bSRalph Campbell * 58f931551bSRalph Campbell * Cancel a range of PIO buffers. Used at user process close, 59f931551bSRalph Campbell * in case it died while writing to a PIO buffer. 60f931551bSRalph Campbell */ 61f931551bSRalph Campbell void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt) 62f931551bSRalph Campbell { 63f931551bSRalph Campbell unsigned long flags; 64f931551bSRalph Campbell unsigned i; 65f931551bSRalph Campbell unsigned last; 66f931551bSRalph Campbell 67f931551bSRalph Campbell last = first + cnt; 68f931551bSRalph Campbell spin_lock_irqsave(&dd->pioavail_lock, flags); 69f931551bSRalph Campbell for (i = first; i < last; i++) { 70f931551bSRalph Campbell __clear_bit(i, dd->pio_need_disarm); 71f931551bSRalph Campbell dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); 72f931551bSRalph Campbell } 73f931551bSRalph Campbell spin_unlock_irqrestore(&dd->pioavail_lock, flags); 74f931551bSRalph Campbell } 75f931551bSRalph Campbell 76f931551bSRalph Campbell /* 77f931551bSRalph Campbell * This is called by a user process when it sees the DISARM_BUFS event 78f931551bSRalph Campbell * bit is set. 79f931551bSRalph Campbell */ 80f931551bSRalph Campbell int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd) 81f931551bSRalph Campbell { 82f931551bSRalph Campbell struct qib_devdata *dd = rcd->dd; 83f931551bSRalph Campbell unsigned i; 84f931551bSRalph Campbell unsigned last; 85f931551bSRalph Campbell unsigned n = 0; 86f931551bSRalph Campbell 87f931551bSRalph Campbell last = rcd->pio_base + rcd->piocnt; 88f931551bSRalph Campbell /* 89f931551bSRalph Campbell * Don't need uctxt_lock here, since user has called in to us. 90f931551bSRalph Campbell * Clear at start in case more interrupts set bits while we 91f931551bSRalph Campbell * are disarming 92f931551bSRalph Campbell */ 93f931551bSRalph Campbell if (rcd->user_event_mask) { 94f931551bSRalph Campbell /* 95f931551bSRalph Campbell * subctxt_cnt is 0 if not shared, so do base 96f931551bSRalph Campbell * separately, first, then remaining subctxt, if any 97f931551bSRalph Campbell */ 98f931551bSRalph Campbell clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]); 99f931551bSRalph Campbell for (i = 1; i < rcd->subctxt_cnt; i++) 100f931551bSRalph Campbell clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, 101f931551bSRalph Campbell &rcd->user_event_mask[i]); 102f931551bSRalph Campbell } 103f931551bSRalph Campbell spin_lock_irq(&dd->pioavail_lock); 104f931551bSRalph Campbell for (i = rcd->pio_base; i < last; i++) { 105f931551bSRalph Campbell if (__test_and_clear_bit(i, dd->pio_need_disarm)) { 106f931551bSRalph Campbell n++; 107f931551bSRalph Campbell dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); 108f931551bSRalph Campbell } 109f931551bSRalph Campbell } 110f931551bSRalph Campbell spin_unlock_irq(&dd->pioavail_lock); 111f931551bSRalph Campbell return 0; 112f931551bSRalph Campbell } 113f931551bSRalph Campbell 114f931551bSRalph Campbell static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i) 115f931551bSRalph Campbell { 116f931551bSRalph Campbell struct qib_pportdata *ppd; 117f931551bSRalph Campbell unsigned pidx; 118f931551bSRalph Campbell 119f931551bSRalph Campbell for (pidx = 0; pidx < dd->num_pports; pidx++) { 120f931551bSRalph Campbell ppd = dd->pport + pidx; 121f931551bSRalph Campbell if (i >= ppd->sdma_state.first_sendbuf && 122f931551bSRalph Campbell i < ppd->sdma_state.last_sendbuf) 123f931551bSRalph Campbell return ppd; 124f931551bSRalph Campbell } 125f931551bSRalph Campbell return NULL; 126f931551bSRalph Campbell } 127f931551bSRalph Campbell 128f931551bSRalph Campbell /* 129f931551bSRalph Campbell * Return true if send buffer is being used by a user context. 130f931551bSRalph Campbell * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect 131f931551bSRalph Campbell */ 132f931551bSRalph Campbell static int find_ctxt(struct qib_devdata *dd, unsigned bufn) 133f931551bSRalph Campbell { 134f931551bSRalph Campbell struct qib_ctxtdata *rcd; 135f931551bSRalph Campbell unsigned ctxt; 136f931551bSRalph Campbell int ret = 0; 137f931551bSRalph Campbell 138f931551bSRalph Campbell spin_lock(&dd->uctxt_lock); 139f931551bSRalph Campbell for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { 140f931551bSRalph Campbell rcd = dd->rcd[ctxt]; 141f931551bSRalph Campbell if (!rcd || bufn < rcd->pio_base || 142f931551bSRalph Campbell bufn >= rcd->pio_base + rcd->piocnt) 143f931551bSRalph Campbell continue; 144f931551bSRalph Campbell if (rcd->user_event_mask) { 145f931551bSRalph Campbell int i; 146f931551bSRalph Campbell /* 147f931551bSRalph Campbell * subctxt_cnt is 0 if not shared, so do base 148f931551bSRalph Campbell * separately, first, then remaining subctxt, if any 149f931551bSRalph Campbell */ 150f931551bSRalph Campbell set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 151f931551bSRalph Campbell &rcd->user_event_mask[0]); 152f931551bSRalph Campbell for (i = 1; i < rcd->subctxt_cnt; i++) 153f931551bSRalph Campbell set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 154f931551bSRalph Campbell &rcd->user_event_mask[i]); 155f931551bSRalph Campbell } 156f931551bSRalph Campbell ret = 1; 157f931551bSRalph Campbell break; 158f931551bSRalph Campbell } 159f931551bSRalph Campbell spin_unlock(&dd->uctxt_lock); 160f931551bSRalph Campbell 161f931551bSRalph Campbell return ret; 162f931551bSRalph Campbell } 163f931551bSRalph Campbell 164f931551bSRalph Campbell /* 165f931551bSRalph Campbell * Disarm a set of send buffers. If the buffer might be actively being 166f931551bSRalph Campbell * written to, mark the buffer to be disarmed later when it is not being 167f931551bSRalph Campbell * written to. 168f931551bSRalph Campbell * 169f931551bSRalph Campbell * This should only be called from the IRQ error handler. 170f931551bSRalph Campbell */ 171f931551bSRalph Campbell void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, 172f931551bSRalph Campbell unsigned cnt) 173f931551bSRalph Campbell { 174cc323b2aSRalph Campbell struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS]; 175f931551bSRalph Campbell unsigned i; 176f931551bSRalph Campbell unsigned long flags; 177f931551bSRalph Campbell 178f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) 179f931551bSRalph Campbell pppd[i] = NULL; 180f931551bSRalph Campbell 181f931551bSRalph Campbell for (i = 0; i < cnt; i++) { 182f931551bSRalph Campbell int which; 183f931551bSRalph Campbell if (!test_bit(i, mask)) 184f931551bSRalph Campbell continue; 185f931551bSRalph Campbell /* 186f931551bSRalph Campbell * If the buffer is owned by the DMA hardware, 187f931551bSRalph Campbell * reset the DMA engine. 188f931551bSRalph Campbell */ 189f931551bSRalph Campbell ppd = is_sdma_buf(dd, i); 190f931551bSRalph Campbell if (ppd) { 191f931551bSRalph Campbell pppd[ppd->port] = ppd; 192f931551bSRalph Campbell continue; 193f931551bSRalph Campbell } 194f931551bSRalph Campbell /* 195f931551bSRalph Campbell * If the kernel is writing the buffer or the buffer is 196f931551bSRalph Campbell * owned by a user process, we can't clear it yet. 197f931551bSRalph Campbell */ 198f931551bSRalph Campbell spin_lock_irqsave(&dd->pioavail_lock, flags); 199f931551bSRalph Campbell if (test_bit(i, dd->pio_writing) || 200f931551bSRalph Campbell (!test_bit(i << 1, dd->pioavailkernel) && 201f931551bSRalph Campbell find_ctxt(dd, i))) { 202f931551bSRalph Campbell __set_bit(i, dd->pio_need_disarm); 203f931551bSRalph Campbell which = 0; 204f931551bSRalph Campbell } else { 205f931551bSRalph Campbell which = 1; 206f931551bSRalph Campbell dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); 207f931551bSRalph Campbell } 208f931551bSRalph Campbell spin_unlock_irqrestore(&dd->pioavail_lock, flags); 209f931551bSRalph Campbell } 210f931551bSRalph Campbell 211f931551bSRalph Campbell /* do cancel_sends once per port that had sdma piobufs in error */ 212f931551bSRalph Campbell for (i = 0; i < dd->num_pports; i++) 213f931551bSRalph Campbell if (pppd[i]) 214f931551bSRalph Campbell qib_cancel_sends(pppd[i]); 215f931551bSRalph Campbell } 216f931551bSRalph Campbell 217f931551bSRalph Campbell /** 218f931551bSRalph Campbell * update_send_bufs - update shadow copy of the PIO availability map 219f931551bSRalph Campbell * @dd: the qlogic_ib device 220f931551bSRalph Campbell * 221f931551bSRalph Campbell * called whenever our local copy indicates we have run out of send buffers 222f931551bSRalph Campbell */ 223f931551bSRalph Campbell static void update_send_bufs(struct qib_devdata *dd) 224f931551bSRalph Campbell { 225f931551bSRalph Campbell unsigned long flags; 226f931551bSRalph Campbell unsigned i; 227f931551bSRalph Campbell const unsigned piobregs = dd->pioavregs; 228f931551bSRalph Campbell 229f931551bSRalph Campbell /* 230f931551bSRalph Campbell * If the generation (check) bits have changed, then we update the 231f931551bSRalph Campbell * busy bit for the corresponding PIO buffer. This algorithm will 232f931551bSRalph Campbell * modify positions to the value they already have in some cases 233f931551bSRalph Campbell * (i.e., no change), but it's faster than changing only the bits 234f931551bSRalph Campbell * that have changed. 235f931551bSRalph Campbell * 236f931551bSRalph Campbell * We would like to do this atomicly, to avoid spinlocks in the 237f931551bSRalph Campbell * critical send path, but that's not really possible, given the 238f931551bSRalph Campbell * type of changes, and that this routine could be called on 239f931551bSRalph Campbell * multiple cpu's simultaneously, so we lock in this routine only, 240f931551bSRalph Campbell * to avoid conflicting updates; all we change is the shadow, and 241f931551bSRalph Campbell * it's a single 64 bit memory location, so by definition the update 242f931551bSRalph Campbell * is atomic in terms of what other cpu's can see in testing the 243f931551bSRalph Campbell * bits. The spin_lock overhead isn't too bad, since it only 244f931551bSRalph Campbell * happens when all buffers are in use, so only cpu overhead, not 245f931551bSRalph Campbell * latency or bandwidth is affected. 246f931551bSRalph Campbell */ 247f931551bSRalph Campbell if (!dd->pioavailregs_dma) 248f931551bSRalph Campbell return; 249f931551bSRalph Campbell spin_lock_irqsave(&dd->pioavail_lock, flags); 250f931551bSRalph Campbell for (i = 0; i < piobregs; i++) { 251f931551bSRalph Campbell u64 pchbusy, pchg, piov, pnew; 252f931551bSRalph Campbell 253f931551bSRalph Campbell piov = le64_to_cpu(dd->pioavailregs_dma[i]); 254f931551bSRalph Campbell pchg = dd->pioavailkernel[i] & 255f931551bSRalph Campbell ~(dd->pioavailshadow[i] ^ piov); 256f931551bSRalph Campbell pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT; 257f931551bSRalph Campbell if (pchg && (pchbusy & dd->pioavailshadow[i])) { 258f931551bSRalph Campbell pnew = dd->pioavailshadow[i] & ~pchbusy; 259f931551bSRalph Campbell pnew |= piov & pchbusy; 260f931551bSRalph Campbell dd->pioavailshadow[i] = pnew; 261f931551bSRalph Campbell } 262f931551bSRalph Campbell } 263f931551bSRalph Campbell spin_unlock_irqrestore(&dd->pioavail_lock, flags); 264f931551bSRalph Campbell } 265f931551bSRalph Campbell 266f931551bSRalph Campbell /* 267f931551bSRalph Campbell * Debugging code and stats updates if no pio buffers available. 268f931551bSRalph Campbell */ 269f931551bSRalph Campbell static noinline void no_send_bufs(struct qib_devdata *dd) 270f931551bSRalph Campbell { 271f931551bSRalph Campbell dd->upd_pio_shadow = 1; 272f931551bSRalph Campbell 273f931551bSRalph Campbell /* not atomic, but if we lose a stat count in a while, that's OK */ 274f931551bSRalph Campbell qib_stats.sps_nopiobufs++; 275f931551bSRalph Campbell } 276f931551bSRalph Campbell 277f931551bSRalph Campbell /* 278f931551bSRalph Campbell * Common code for normal driver send buffer allocation, and reserved 279f931551bSRalph Campbell * allocation. 280f931551bSRalph Campbell * 281f931551bSRalph Campbell * Do appropriate marking as busy, etc. 282f931551bSRalph Campbell * Returns buffer pointer if one is found, otherwise NULL. 283f931551bSRalph Campbell */ 284f931551bSRalph Campbell u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, 285f931551bSRalph Campbell u32 first, u32 last) 286f931551bSRalph Campbell { 287f931551bSRalph Campbell unsigned i, j, updated = 0; 288f931551bSRalph Campbell unsigned nbufs; 289f931551bSRalph Campbell unsigned long flags; 290f931551bSRalph Campbell unsigned long *shadow = dd->pioavailshadow; 291f931551bSRalph Campbell u32 __iomem *buf; 292f931551bSRalph Campbell 293f931551bSRalph Campbell if (!(dd->flags & QIB_PRESENT)) 294f931551bSRalph Campbell return NULL; 295f931551bSRalph Campbell 296f931551bSRalph Campbell nbufs = last - first + 1; /* number in range to check */ 297f931551bSRalph Campbell if (dd->upd_pio_shadow) { 298*bb77a077SMike Marciniszyn update_shadow: 299f931551bSRalph Campbell /* 300f931551bSRalph Campbell * Minor optimization. If we had no buffers on last call, 301f931551bSRalph Campbell * start out by doing the update; continue and do scan even 302f931551bSRalph Campbell * if no buffers were updated, to be paranoid. 303f931551bSRalph Campbell */ 304f931551bSRalph Campbell update_send_bufs(dd); 305f931551bSRalph Campbell updated++; 306f931551bSRalph Campbell } 307f931551bSRalph Campbell i = first; 308f931551bSRalph Campbell /* 309f931551bSRalph Campbell * While test_and_set_bit() is atomic, we do that and then the 310f931551bSRalph Campbell * change_bit(), and the pair is not. See if this is the cause 311f931551bSRalph Campbell * of the remaining armlaunch errors. 312f931551bSRalph Campbell */ 313f931551bSRalph Campbell spin_lock_irqsave(&dd->pioavail_lock, flags); 314*bb77a077SMike Marciniszyn if (dd->last_pio >= first && dd->last_pio <= last) 315*bb77a077SMike Marciniszyn i = dd->last_pio + 1; 316*bb77a077SMike Marciniszyn if (!first) 317*bb77a077SMike Marciniszyn /* adjust to min possible */ 318*bb77a077SMike Marciniszyn nbufs = last - dd->min_kernel_pio + 1; 319f931551bSRalph Campbell for (j = 0; j < nbufs; j++, i++) { 320f931551bSRalph Campbell if (i > last) 321*bb77a077SMike Marciniszyn i = !first ? dd->min_kernel_pio : first; 322f931551bSRalph Campbell if (__test_and_set_bit((2 * i) + 1, shadow)) 323f931551bSRalph Campbell continue; 324f931551bSRalph Campbell /* flip generation bit */ 325f931551bSRalph Campbell __change_bit(2 * i, shadow); 326f931551bSRalph Campbell /* remember that the buffer can be written to now */ 327f931551bSRalph Campbell __set_bit(i, dd->pio_writing); 328*bb77a077SMike Marciniszyn if (!first && first != last) /* first == last on VL15, avoid */ 329*bb77a077SMike Marciniszyn dd->last_pio = i; 330f931551bSRalph Campbell break; 331f931551bSRalph Campbell } 332f931551bSRalph Campbell spin_unlock_irqrestore(&dd->pioavail_lock, flags); 333f931551bSRalph Campbell 334f931551bSRalph Campbell if (j == nbufs) { 335*bb77a077SMike Marciniszyn if (!updated) 336f931551bSRalph Campbell /* 337f931551bSRalph Campbell * First time through; shadow exhausted, but may be 338f931551bSRalph Campbell * buffers available, try an update and then rescan. 339f931551bSRalph Campbell */ 340*bb77a077SMike Marciniszyn goto update_shadow; 341f931551bSRalph Campbell no_send_bufs(dd); 342f931551bSRalph Campbell buf = NULL; 343f931551bSRalph Campbell } else { 344f931551bSRalph Campbell if (i < dd->piobcnt2k) 345f931551bSRalph Campbell buf = (u32 __iomem *)(dd->pio2kbase + 346f931551bSRalph Campbell i * dd->palign); 347fce24a9dSDave Olson else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base) 348f931551bSRalph Campbell buf = (u32 __iomem *)(dd->pio4kbase + 349f931551bSRalph Campbell (i - dd->piobcnt2k) * dd->align4k); 350fce24a9dSDave Olson else 351fce24a9dSDave Olson buf = (u32 __iomem *)(dd->piovl15base + 352fce24a9dSDave Olson (i - (dd->piobcnt2k + dd->piobcnt4k)) * 353fce24a9dSDave Olson dd->align4k); 354f931551bSRalph Campbell if (pbufnum) 355f931551bSRalph Campbell *pbufnum = i; 356f931551bSRalph Campbell dd->upd_pio_shadow = 0; 357f931551bSRalph Campbell } 358f931551bSRalph Campbell 359f931551bSRalph Campbell return buf; 360f931551bSRalph Campbell } 361f931551bSRalph Campbell 362f931551bSRalph Campbell /* 363f931551bSRalph Campbell * Record that the caller is finished writing to the buffer so we don't 364f931551bSRalph Campbell * disarm it while it is being written and disarm it now if needed. 365f931551bSRalph Campbell */ 366f931551bSRalph Campbell void qib_sendbuf_done(struct qib_devdata *dd, unsigned n) 367f931551bSRalph Campbell { 368f931551bSRalph Campbell unsigned long flags; 369f931551bSRalph Campbell 370f931551bSRalph Campbell spin_lock_irqsave(&dd->pioavail_lock, flags); 371f931551bSRalph Campbell __clear_bit(n, dd->pio_writing); 372f931551bSRalph Campbell if (__test_and_clear_bit(n, dd->pio_need_disarm)) 373f931551bSRalph Campbell dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); 374f931551bSRalph Campbell spin_unlock_irqrestore(&dd->pioavail_lock, flags); 375f931551bSRalph Campbell } 376f931551bSRalph Campbell 377f931551bSRalph Campbell /** 378f931551bSRalph Campbell * qib_chg_pioavailkernel - change which send buffers are available for kernel 379f931551bSRalph Campbell * @dd: the qlogic_ib device 380f931551bSRalph Campbell * @start: the starting send buffer number 381f931551bSRalph Campbell * @len: the number of send buffers 382f931551bSRalph Campbell * @avail: true if the buffers are available for kernel use, false otherwise 383f931551bSRalph Campbell */ 384f931551bSRalph Campbell void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, 385f931551bSRalph Campbell unsigned len, u32 avail, struct qib_ctxtdata *rcd) 386f931551bSRalph Campbell { 387f931551bSRalph Campbell unsigned long flags; 388f931551bSRalph Campbell unsigned end; 389f931551bSRalph Campbell unsigned ostart = start; 390f931551bSRalph Campbell 391f931551bSRalph Campbell /* There are two bits per send buffer (busy and generation) */ 392f931551bSRalph Campbell start *= 2; 393f931551bSRalph Campbell end = start + len * 2; 394f931551bSRalph Campbell 395f931551bSRalph Campbell spin_lock_irqsave(&dd->pioavail_lock, flags); 396f931551bSRalph Campbell /* Set or clear the busy bit in the shadow. */ 397f931551bSRalph Campbell while (start < end) { 398f931551bSRalph Campbell if (avail) { 399f931551bSRalph Campbell unsigned long dma; 400f931551bSRalph Campbell int i; 401f931551bSRalph Campbell 402f931551bSRalph Campbell /* 403f931551bSRalph Campbell * The BUSY bit will never be set, because we disarm 404f931551bSRalph Campbell * the user buffers before we hand them back to the 405f931551bSRalph Campbell * kernel. We do have to make sure the generation 406f931551bSRalph Campbell * bit is set correctly in shadow, since it could 407f931551bSRalph Campbell * have changed many times while allocated to user. 408f931551bSRalph Campbell * We can't use the bitmap functions on the full 409f931551bSRalph Campbell * dma array because it is always little-endian, so 410f931551bSRalph Campbell * we have to flip to host-order first. 411f931551bSRalph Campbell * BITS_PER_LONG is slightly wrong, since it's 412f931551bSRalph Campbell * always 64 bits per register in chip... 413f931551bSRalph Campbell * We only work on 64 bit kernels, so that's OK. 414f931551bSRalph Campbell */ 415f931551bSRalph Campbell i = start / BITS_PER_LONG; 416f931551bSRalph Campbell __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start, 417f931551bSRalph Campbell dd->pioavailshadow); 418f931551bSRalph Campbell dma = (unsigned long) 419f931551bSRalph Campbell le64_to_cpu(dd->pioavailregs_dma[i]); 420f931551bSRalph Campbell if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + 421f931551bSRalph Campbell start) % BITS_PER_LONG, &dma)) 422f931551bSRalph Campbell __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT + 423f931551bSRalph Campbell start, dd->pioavailshadow); 424f931551bSRalph Campbell else 425f931551bSRalph Campbell __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 426f931551bSRalph Campbell + start, dd->pioavailshadow); 427f931551bSRalph Campbell __set_bit(start, dd->pioavailkernel); 428*bb77a077SMike Marciniszyn if ((start >> 1) < dd->min_kernel_pio) 429*bb77a077SMike Marciniszyn dd->min_kernel_pio = start >> 1; 430f931551bSRalph Campbell } else { 431f931551bSRalph Campbell __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT, 432f931551bSRalph Campbell dd->pioavailshadow); 433f931551bSRalph Campbell __clear_bit(start, dd->pioavailkernel); 434*bb77a077SMike Marciniszyn if ((start >> 1) > dd->min_kernel_pio) 435*bb77a077SMike Marciniszyn dd->min_kernel_pio = start >> 1; 436f931551bSRalph Campbell } 437f931551bSRalph Campbell start += 2; 438f931551bSRalph Campbell } 439f931551bSRalph Campbell 440*bb77a077SMike Marciniszyn if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1) 441*bb77a077SMike Marciniszyn dd->last_pio = dd->min_kernel_pio - 1; 442f931551bSRalph Campbell spin_unlock_irqrestore(&dd->pioavail_lock, flags); 443f931551bSRalph Campbell 444f931551bSRalph Campbell dd->f_txchk_change(dd, ostart, len, avail, rcd); 445f931551bSRalph Campbell } 446f931551bSRalph Campbell 447f931551bSRalph Campbell /* 448f931551bSRalph Campbell * Flush all sends that might be in the ready to send state, as well as any 449f931551bSRalph Campbell * that are in the process of being sent. Used whenever we need to be 450f931551bSRalph Campbell * sure the send side is idle. Cleans up all buffer state by canceling 451f931551bSRalph Campbell * all pio buffers, and issuing an abort, which cleans up anything in the 452f931551bSRalph Campbell * launch fifo. The cancel is superfluous on some chip versions, but 453f931551bSRalph Campbell * it's safer to always do it. 454f931551bSRalph Campbell * PIOAvail bits are updated by the chip as if a normal send had happened. 455f931551bSRalph Campbell */ 456f931551bSRalph Campbell void qib_cancel_sends(struct qib_pportdata *ppd) 457f931551bSRalph Campbell { 458f931551bSRalph Campbell struct qib_devdata *dd = ppd->dd; 459f931551bSRalph Campbell struct qib_ctxtdata *rcd; 460f931551bSRalph Campbell unsigned long flags; 461f931551bSRalph Campbell unsigned ctxt; 462f931551bSRalph Campbell unsigned i; 463f931551bSRalph Campbell unsigned last; 464f931551bSRalph Campbell 465f931551bSRalph Campbell /* 466f931551bSRalph Campbell * Tell PSM to disarm buffers again before trying to reuse them. 467f931551bSRalph Campbell * We need to be sure the rcd doesn't change out from under us 468f931551bSRalph Campbell * while we do so. We hold the two locks sequentially. We might 469f931551bSRalph Campbell * needlessly set some need_disarm bits as a result, if the 470f931551bSRalph Campbell * context is closed after we release the uctxt_lock, but that's 471f931551bSRalph Campbell * fairly benign, and safer than nesting the locks. 472f931551bSRalph Campbell */ 473f931551bSRalph Campbell for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { 474f931551bSRalph Campbell spin_lock_irqsave(&dd->uctxt_lock, flags); 475f931551bSRalph Campbell rcd = dd->rcd[ctxt]; 476f931551bSRalph Campbell if (rcd && rcd->ppd == ppd) { 477f931551bSRalph Campbell last = rcd->pio_base + rcd->piocnt; 478f931551bSRalph Campbell if (rcd->user_event_mask) { 479f931551bSRalph Campbell /* 480f931551bSRalph Campbell * subctxt_cnt is 0 if not shared, so do base 481f931551bSRalph Campbell * separately, first, then remaining subctxt, 482f931551bSRalph Campbell * if any 483f931551bSRalph Campbell */ 484f931551bSRalph Campbell set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 485f931551bSRalph Campbell &rcd->user_event_mask[0]); 486f931551bSRalph Campbell for (i = 1; i < rcd->subctxt_cnt; i++) 487f931551bSRalph Campbell set_bit(_QIB_EVENT_DISARM_BUFS_BIT, 488f931551bSRalph Campbell &rcd->user_event_mask[i]); 489f931551bSRalph Campbell } 490f931551bSRalph Campbell i = rcd->pio_base; 491f931551bSRalph Campbell spin_unlock_irqrestore(&dd->uctxt_lock, flags); 492f931551bSRalph Campbell spin_lock_irqsave(&dd->pioavail_lock, flags); 493f931551bSRalph Campbell for (; i < last; i++) 494f931551bSRalph Campbell __set_bit(i, dd->pio_need_disarm); 495f931551bSRalph Campbell spin_unlock_irqrestore(&dd->pioavail_lock, flags); 496f931551bSRalph Campbell } else 497f931551bSRalph Campbell spin_unlock_irqrestore(&dd->uctxt_lock, flags); 498f931551bSRalph Campbell } 499f931551bSRalph Campbell 500f931551bSRalph Campbell if (!(dd->flags & QIB_HAS_SEND_DMA)) 501f931551bSRalph Campbell dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | 502f931551bSRalph Campbell QIB_SENDCTRL_FLUSH); 503f931551bSRalph Campbell } 504f931551bSRalph Campbell 505f931551bSRalph Campbell /* 506f931551bSRalph Campbell * Force an update of in-memory copy of the pioavail registers, when 507f931551bSRalph Campbell * needed for any of a variety of reasons. 508f931551bSRalph Campbell * If already off, this routine is a nop, on the assumption that the 509f931551bSRalph Campbell * caller (or set of callers) will "do the right thing". 510f931551bSRalph Campbell * This is a per-device operation, so just the first port. 511f931551bSRalph Campbell */ 512f931551bSRalph Campbell void qib_force_pio_avail_update(struct qib_devdata *dd) 513f931551bSRalph Campbell { 514f931551bSRalph Campbell dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); 515f931551bSRalph Campbell } 516f931551bSRalph Campbell 517f931551bSRalph Campbell void qib_hol_down(struct qib_pportdata *ppd) 518f931551bSRalph Campbell { 519f931551bSRalph Campbell /* 520f931551bSRalph Campbell * Cancel sends when the link goes DOWN so that we aren't doing it 521f931551bSRalph Campbell * at INIT when we might be trying to send SMI packets. 522f931551bSRalph Campbell */ 523f931551bSRalph Campbell if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) 524f931551bSRalph Campbell qib_cancel_sends(ppd); 525f931551bSRalph Campbell } 526f931551bSRalph Campbell 527f931551bSRalph Campbell /* 528f931551bSRalph Campbell * Link is at INIT. 529f931551bSRalph Campbell * We start the HoL timer so we can detect stuck packets blocking SMP replies. 530f931551bSRalph Campbell * Timer may already be running, so use mod_timer, not add_timer. 531f931551bSRalph Campbell */ 532f931551bSRalph Campbell void qib_hol_init(struct qib_pportdata *ppd) 533f931551bSRalph Campbell { 534f931551bSRalph Campbell if (ppd->hol_state != QIB_HOL_INIT) { 535f931551bSRalph Campbell ppd->hol_state = QIB_HOL_INIT; 536f931551bSRalph Campbell mod_timer(&ppd->hol_timer, 537f931551bSRalph Campbell jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); 538f931551bSRalph Campbell } 539f931551bSRalph Campbell } 540f931551bSRalph Campbell 541f931551bSRalph Campbell /* 542f931551bSRalph Campbell * Link is up, continue any user processes, and ensure timer 543f931551bSRalph Campbell * is a nop, if running. Let timer keep running, if set; it 544f931551bSRalph Campbell * will nop when it sees the link is up. 545f931551bSRalph Campbell */ 546f931551bSRalph Campbell void qib_hol_up(struct qib_pportdata *ppd) 547f931551bSRalph Campbell { 548f931551bSRalph Campbell ppd->hol_state = QIB_HOL_UP; 549f931551bSRalph Campbell } 550f931551bSRalph Campbell 551f931551bSRalph Campbell /* 552f931551bSRalph Campbell * This is only called via the timer. 553f931551bSRalph Campbell */ 554f931551bSRalph Campbell void qib_hol_event(unsigned long opaque) 555f931551bSRalph Campbell { 556f931551bSRalph Campbell struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 557f931551bSRalph Campbell 558f931551bSRalph Campbell /* If hardware error, etc, skip. */ 559f931551bSRalph Campbell if (!(ppd->dd->flags & QIB_INITTED)) 560f931551bSRalph Campbell return; 561f931551bSRalph Campbell 562f931551bSRalph Campbell if (ppd->hol_state != QIB_HOL_UP) { 563f931551bSRalph Campbell /* 564f931551bSRalph Campbell * Try to flush sends in case a stuck packet is blocking 565f931551bSRalph Campbell * SMP replies. 566f931551bSRalph Campbell */ 567f931551bSRalph Campbell qib_hol_down(ppd); 568f931551bSRalph Campbell mod_timer(&ppd->hol_timer, 569f931551bSRalph Campbell jiffies + msecs_to_jiffies(qib_hol_timeout_ms)); 570f931551bSRalph Campbell } 571f931551bSRalph Campbell } 572