1 /* 2 * Intel I/OAT DMA Linux driver 3 * Copyright(c) 2004 - 2009 Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 */ 22 23 /* 24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous 25 * copy operations. 26 */ 27 28 #include <linux/init.h> 29 #include <linux/module.h> 30 #include <linux/slab.h> 31 #include <linux/pci.h> 32 #include <linux/interrupt.h> 33 #include <linux/dmaengine.h> 34 #include <linux/delay.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/workqueue.h> 37 #include <linux/prefetch.h> 38 #include <linux/i7300_idle.h> 39 #include "dma.h" 40 #include "registers.h" 41 #include "hw.h" 42 43 #include "../dmaengine.h" 44 45 int ioat_pending_level = 4; 46 module_param(ioat_pending_level, int, 0644); 47 MODULE_PARM_DESC(ioat_pending_level, 48 "high-water mark for pushing ioat descriptors (default: 4)"); 49 50 /* internal functions */ 51 static void ioat1_cleanup(struct ioat_dma_chan *ioat); 52 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); 53 54 /** 55 * ioat_dma_do_interrupt - handler used for single vector interrupt mode 56 * @irq: interrupt id 57 * @data: interrupt data 58 */ 59 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) 60 { 61 struct ioatdma_device *instance = data; 62 struct ioat_chan_common *chan; 63 unsigned long attnstatus; 64 int bit; 65 u8 intrctrl; 66 67 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); 68 69 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) 70 return IRQ_NONE; 71 72 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { 73 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); 74 return IRQ_NONE; 75 } 76 77 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); 78 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { 79 chan = ioat_chan_by_index(instance, bit); 80 if (test_bit(IOAT_RUN, &chan->state)) 81 tasklet_schedule(&chan->cleanup_task); 82 } 83 84 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); 85 return IRQ_HANDLED; 86 } 87 88 /** 89 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode 90 * @irq: interrupt id 91 * @data: interrupt data 92 */ 93 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) 94 { 95 struct ioat_chan_common *chan = data; 96 97 if (test_bit(IOAT_RUN, &chan->state)) 98 tasklet_schedule(&chan->cleanup_task); 99 100 return IRQ_HANDLED; 101 } 102 103 /* common channel initialization */ 104 void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) 105 { 106 struct dma_device *dma = &device->common; 107 struct dma_chan *c = &chan->common; 108 unsigned long data = (unsigned long) c; 109 110 chan->device = device; 111 chan->reg_base = device->reg_base + (0x80 * (idx + 1)); 112 spin_lock_init(&chan->cleanup_lock); 113 chan->common.device = dma; 114 dma_cookie_init(&chan->common); 115 list_add_tail(&chan->common.device_node, &dma->channels); 116 device->idx[idx] = chan; 117 init_timer(&chan->timer); 118 chan->timer.function = device->timer_fn; 119 chan->timer.data = data; 120 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); 121 } 122 123 /** 124 * ioat1_dma_enumerate_channels - find and initialize the device's channels 125 * @device: the device to be enumerated 126 */ 127 static int ioat1_enumerate_channels(struct ioatdma_device *device) 128 { 129 u8 xfercap_scale; 130 u32 xfercap; 131 int i; 132 struct ioat_dma_chan *ioat; 133 struct device *dev = &device->pdev->dev; 134 struct dma_device *dma = &device->common; 135 136 INIT_LIST_HEAD(&dma->channels); 137 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); 138 dma->chancnt &= 0x1f; /* bits [4:0] valid */ 139 if (dma->chancnt > ARRAY_SIZE(device->idx)) { 140 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", 141 dma->chancnt, ARRAY_SIZE(device->idx)); 142 dma->chancnt = ARRAY_SIZE(device->idx); 143 } 144 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 145 xfercap_scale &= 0x1f; /* bits [4:0] valid */ 146 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 147 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); 148 149 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL 150 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) 151 dma->chancnt--; 152 #endif 153 for (i = 0; i < dma->chancnt; i++) { 154 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); 155 if (!ioat) 156 break; 157 158 ioat_init_channel(device, &ioat->base, i); 159 ioat->xfercap = xfercap; 160 spin_lock_init(&ioat->desc_lock); 161 INIT_LIST_HEAD(&ioat->free_desc); 162 INIT_LIST_HEAD(&ioat->used_desc); 163 } 164 dma->chancnt = i; 165 return i; 166 } 167 168 /** 169 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 170 * descriptors to hw 171 * @chan: DMA channel handle 172 */ 173 static inline void 174 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) 175 { 176 void __iomem *reg_base = ioat->base.reg_base; 177 178 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", 179 __func__, ioat->pending); 180 ioat->pending = 0; 181 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); 182 } 183 184 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) 185 { 186 struct ioat_dma_chan *ioat = to_ioat_chan(chan); 187 188 if (ioat->pending > 0) { 189 spin_lock_bh(&ioat->desc_lock); 190 __ioat1_dma_memcpy_issue_pending(ioat); 191 spin_unlock_bh(&ioat->desc_lock); 192 } 193 } 194 195 /** 196 * ioat1_reset_channel - restart a channel 197 * @ioat: IOAT DMA channel handle 198 */ 199 static void ioat1_reset_channel(struct ioat_dma_chan *ioat) 200 { 201 struct ioat_chan_common *chan = &ioat->base; 202 void __iomem *reg_base = chan->reg_base; 203 u32 chansts, chanerr; 204 205 dev_warn(to_dev(chan), "reset\n"); 206 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); 207 chansts = *chan->completion & IOAT_CHANSTS_STATUS; 208 if (chanerr) { 209 dev_err(to_dev(chan), 210 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", 211 chan_num(chan), chansts, chanerr); 212 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); 213 } 214 215 /* 216 * whack it upside the head with a reset 217 * and wait for things to settle out. 218 * force the pending count to a really big negative 219 * to make sure no one forces an issue_pending 220 * while we're waiting. 221 */ 222 223 ioat->pending = INT_MIN; 224 writeb(IOAT_CHANCMD_RESET, 225 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); 226 set_bit(IOAT_RESET_PENDING, &chan->state); 227 mod_timer(&chan->timer, jiffies + RESET_DELAY); 228 } 229 230 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 231 { 232 struct dma_chan *c = tx->chan; 233 struct ioat_dma_chan *ioat = to_ioat_chan(c); 234 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); 235 struct ioat_chan_common *chan = &ioat->base; 236 struct ioat_desc_sw *first; 237 struct ioat_desc_sw *chain_tail; 238 dma_cookie_t cookie; 239 240 spin_lock_bh(&ioat->desc_lock); 241 /* cookie incr and addition to used_list must be atomic */ 242 cookie = dma_cookie_assign(tx); 243 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 244 245 /* write address into NextDescriptor field of last desc in chain */ 246 first = to_ioat_desc(desc->tx_list.next); 247 chain_tail = to_ioat_desc(ioat->used_desc.prev); 248 /* make descriptor updates globally visible before chaining */ 249 wmb(); 250 chain_tail->hw->next = first->txd.phys; 251 list_splice_tail_init(&desc->tx_list, &ioat->used_desc); 252 dump_desc_dbg(ioat, chain_tail); 253 dump_desc_dbg(ioat, first); 254 255 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) 256 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 257 258 ioat->active += desc->hw->tx_cnt; 259 ioat->pending += desc->hw->tx_cnt; 260 if (ioat->pending >= ioat_pending_level) 261 __ioat1_dma_memcpy_issue_pending(ioat); 262 spin_unlock_bh(&ioat->desc_lock); 263 264 return cookie; 265 } 266 267 /** 268 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair 269 * @ioat: the channel supplying the memory pool for the descriptors 270 * @flags: allocation flags 271 */ 272 static struct ioat_desc_sw * 273 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) 274 { 275 struct ioat_dma_descriptor *desc; 276 struct ioat_desc_sw *desc_sw; 277 struct ioatdma_device *ioatdma_device; 278 dma_addr_t phys; 279 280 ioatdma_device = ioat->base.device; 281 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); 282 if (unlikely(!desc)) 283 return NULL; 284 285 desc_sw = kzalloc(sizeof(*desc_sw), flags); 286 if (unlikely(!desc_sw)) { 287 pci_pool_free(ioatdma_device->dma_pool, desc, phys); 288 return NULL; 289 } 290 291 memset(desc, 0, sizeof(*desc)); 292 293 INIT_LIST_HEAD(&desc_sw->tx_list); 294 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); 295 desc_sw->txd.tx_submit = ioat1_tx_submit; 296 desc_sw->hw = desc; 297 desc_sw->txd.phys = phys; 298 set_desc_id(desc_sw, -1); 299 300 return desc_sw; 301 } 302 303 static int ioat_initial_desc_count = 256; 304 module_param(ioat_initial_desc_count, int, 0644); 305 MODULE_PARM_DESC(ioat_initial_desc_count, 306 "ioat1: initial descriptors per channel (default: 256)"); 307 /** 308 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors 309 * @chan: the channel to be filled out 310 */ 311 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) 312 { 313 struct ioat_dma_chan *ioat = to_ioat_chan(c); 314 struct ioat_chan_common *chan = &ioat->base; 315 struct ioat_desc_sw *desc; 316 u32 chanerr; 317 int i; 318 LIST_HEAD(tmp_list); 319 320 /* have we already been set up? */ 321 if (!list_empty(&ioat->free_desc)) 322 return ioat->desccount; 323 324 /* Setup register to interrupt and write completion status on error */ 325 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); 326 327 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 328 if (chanerr) { 329 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); 330 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 331 } 332 333 /* Allocate descriptors */ 334 for (i = 0; i < ioat_initial_desc_count; i++) { 335 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); 336 if (!desc) { 337 dev_err(to_dev(chan), "Only %d initial descriptors\n", i); 338 break; 339 } 340 set_desc_id(desc, i); 341 list_add_tail(&desc->node, &tmp_list); 342 } 343 spin_lock_bh(&ioat->desc_lock); 344 ioat->desccount = i; 345 list_splice(&tmp_list, &ioat->free_desc); 346 spin_unlock_bh(&ioat->desc_lock); 347 348 /* allocate a completion writeback area */ 349 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 350 chan->completion = pci_pool_alloc(chan->device->completion_pool, 351 GFP_KERNEL, &chan->completion_dma); 352 memset(chan->completion, 0, sizeof(*chan->completion)); 353 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, 354 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); 355 writel(((u64) chan->completion_dma) >> 32, 356 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 357 358 set_bit(IOAT_RUN, &chan->state); 359 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ 360 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", 361 __func__, ioat->desccount); 362 return ioat->desccount; 363 } 364 365 void ioat_stop(struct ioat_chan_common *chan) 366 { 367 struct ioatdma_device *device = chan->device; 368 struct pci_dev *pdev = device->pdev; 369 int chan_id = chan_num(chan); 370 struct msix_entry *msix; 371 372 /* 1/ stop irq from firing tasklets 373 * 2/ stop the tasklet from re-arming irqs 374 */ 375 clear_bit(IOAT_RUN, &chan->state); 376 377 /* flush inflight interrupts */ 378 switch (device->irq_mode) { 379 case IOAT_MSIX: 380 msix = &device->msix_entries[chan_id]; 381 synchronize_irq(msix->vector); 382 break; 383 case IOAT_MSI: 384 case IOAT_INTX: 385 synchronize_irq(pdev->irq); 386 break; 387 default: 388 break; 389 } 390 391 /* flush inflight timers */ 392 del_timer_sync(&chan->timer); 393 394 /* flush inflight tasklet runs */ 395 tasklet_kill(&chan->cleanup_task); 396 397 /* final cleanup now that everything is quiesced and can't re-arm */ 398 device->cleanup_fn((unsigned long) &chan->common); 399 } 400 401 /** 402 * ioat1_dma_free_chan_resources - release all the descriptors 403 * @chan: the channel to be cleaned 404 */ 405 static void ioat1_dma_free_chan_resources(struct dma_chan *c) 406 { 407 struct ioat_dma_chan *ioat = to_ioat_chan(c); 408 struct ioat_chan_common *chan = &ioat->base; 409 struct ioatdma_device *ioatdma_device = chan->device; 410 struct ioat_desc_sw *desc, *_desc; 411 int in_use_descs = 0; 412 413 /* Before freeing channel resources first check 414 * if they have been previously allocated for this channel. 415 */ 416 if (ioat->desccount == 0) 417 return; 418 419 ioat_stop(chan); 420 421 /* Delay 100ms after reset to allow internal DMA logic to quiesce 422 * before removing DMA descriptor resources. 423 */ 424 writeb(IOAT_CHANCMD_RESET, 425 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); 426 mdelay(100); 427 428 spin_lock_bh(&ioat->desc_lock); 429 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { 430 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", 431 __func__, desc_id(desc)); 432 dump_desc_dbg(ioat, desc); 433 in_use_descs++; 434 list_del(&desc->node); 435 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 436 desc->txd.phys); 437 kfree(desc); 438 } 439 list_for_each_entry_safe(desc, _desc, 440 &ioat->free_desc, node) { 441 list_del(&desc->node); 442 pci_pool_free(ioatdma_device->dma_pool, desc->hw, 443 desc->txd.phys); 444 kfree(desc); 445 } 446 spin_unlock_bh(&ioat->desc_lock); 447 448 pci_pool_free(ioatdma_device->completion_pool, 449 chan->completion, 450 chan->completion_dma); 451 452 /* one is ok since we left it on there on purpose */ 453 if (in_use_descs > 1) 454 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", 455 in_use_descs - 1); 456 457 chan->last_completion = 0; 458 chan->completion_dma = 0; 459 ioat->pending = 0; 460 ioat->desccount = 0; 461 } 462 463 /** 464 * ioat1_dma_get_next_descriptor - return the next available descriptor 465 * @ioat: IOAT DMA channel handle 466 * 467 * Gets the next descriptor from the chain, and must be called with the 468 * channel's desc_lock held. Allocates more descriptors if the channel 469 * has run out. 470 */ 471 static struct ioat_desc_sw * 472 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) 473 { 474 struct ioat_desc_sw *new; 475 476 if (!list_empty(&ioat->free_desc)) { 477 new = to_ioat_desc(ioat->free_desc.next); 478 list_del(&new->node); 479 } else { 480 /* try to get another desc */ 481 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); 482 if (!new) { 483 dev_err(to_dev(&ioat->base), "alloc failed\n"); 484 return NULL; 485 } 486 } 487 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", 488 __func__, desc_id(new)); 489 prefetch(new->hw); 490 return new; 491 } 492 493 static struct dma_async_tx_descriptor * 494 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, 495 dma_addr_t dma_src, size_t len, unsigned long flags) 496 { 497 struct ioat_dma_chan *ioat = to_ioat_chan(c); 498 struct ioat_desc_sw *desc; 499 size_t copy; 500 LIST_HEAD(chain); 501 dma_addr_t src = dma_src; 502 dma_addr_t dest = dma_dest; 503 size_t total_len = len; 504 struct ioat_dma_descriptor *hw = NULL; 505 int tx_cnt = 0; 506 507 spin_lock_bh(&ioat->desc_lock); 508 desc = ioat1_dma_get_next_descriptor(ioat); 509 do { 510 if (!desc) 511 break; 512 513 tx_cnt++; 514 copy = min_t(size_t, len, ioat->xfercap); 515 516 hw = desc->hw; 517 hw->size = copy; 518 hw->ctl = 0; 519 hw->src_addr = src; 520 hw->dst_addr = dest; 521 522 list_add_tail(&desc->node, &chain); 523 524 len -= copy; 525 dest += copy; 526 src += copy; 527 if (len) { 528 struct ioat_desc_sw *next; 529 530 async_tx_ack(&desc->txd); 531 next = ioat1_dma_get_next_descriptor(ioat); 532 hw->next = next ? next->txd.phys : 0; 533 dump_desc_dbg(ioat, desc); 534 desc = next; 535 } else 536 hw->next = 0; 537 } while (len); 538 539 if (!desc) { 540 struct ioat_chan_common *chan = &ioat->base; 541 542 dev_err(to_dev(chan), 543 "chan%d - get_next_desc failed\n", chan_num(chan)); 544 list_splice(&chain, &ioat->free_desc); 545 spin_unlock_bh(&ioat->desc_lock); 546 return NULL; 547 } 548 spin_unlock_bh(&ioat->desc_lock); 549 550 desc->txd.flags = flags; 551 desc->len = total_len; 552 list_splice(&chain, &desc->tx_list); 553 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 554 hw->ctl_f.compl_write = 1; 555 hw->tx_cnt = tx_cnt; 556 dump_desc_dbg(ioat, desc); 557 558 return &desc->txd; 559 } 560 561 static void ioat1_cleanup_event(unsigned long data) 562 { 563 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); 564 struct ioat_chan_common *chan = &ioat->base; 565 566 ioat1_cleanup(ioat); 567 if (!test_bit(IOAT_RUN, &chan->state)) 568 return; 569 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 570 } 571 572 dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) 573 { 574 dma_addr_t phys_complete; 575 u64 completion; 576 577 completion = *chan->completion; 578 phys_complete = ioat_chansts_to_addr(completion); 579 580 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, 581 (unsigned long long) phys_complete); 582 583 if (is_ioat_halted(completion)) { 584 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 585 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", 586 chanerr); 587 588 /* TODO do something to salvage the situation */ 589 } 590 591 return phys_complete; 592 } 593 594 bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 595 dma_addr_t *phys_complete) 596 { 597 *phys_complete = ioat_get_current_completion(chan); 598 if (*phys_complete == chan->last_completion) 599 return false; 600 clear_bit(IOAT_COMPLETION_ACK, &chan->state); 601 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 602 603 return true; 604 } 605 606 static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) 607 { 608 struct ioat_chan_common *chan = &ioat->base; 609 struct list_head *_desc, *n; 610 struct dma_async_tx_descriptor *tx; 611 612 dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n", 613 __func__, (unsigned long long) phys_complete); 614 list_for_each_safe(_desc, n, &ioat->used_desc) { 615 struct ioat_desc_sw *desc; 616 617 prefetch(n); 618 desc = list_entry(_desc, typeof(*desc), node); 619 tx = &desc->txd; 620 /* 621 * Incoming DMA requests may use multiple descriptors, 622 * due to exceeding xfercap, perhaps. If so, only the 623 * last one will have a cookie, and require unmapping. 624 */ 625 dump_desc_dbg(ioat, desc); 626 if (tx->cookie) { 627 dma_cookie_complete(tx); 628 dma_descriptor_unmap(tx); 629 ioat->active -= desc->hw->tx_cnt; 630 if (tx->callback) { 631 tx->callback(tx->callback_param); 632 tx->callback = NULL; 633 } 634 } 635 636 if (tx->phys != phys_complete) { 637 /* 638 * a completed entry, but not the last, so clean 639 * up if the client is done with the descriptor 640 */ 641 if (async_tx_test_ack(tx)) 642 list_move_tail(&desc->node, &ioat->free_desc); 643 } else { 644 /* 645 * last used desc. Do not remove, so we can 646 * append from it. 647 */ 648 649 /* if nothing else is pending, cancel the 650 * completion timeout 651 */ 652 if (n == &ioat->used_desc) { 653 dev_dbg(to_dev(chan), 654 "%s cancel completion timeout\n", 655 __func__); 656 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 657 } 658 659 /* TODO check status bits? */ 660 break; 661 } 662 } 663 664 chan->last_completion = phys_complete; 665 } 666 667 /** 668 * ioat1_cleanup - cleanup up finished descriptors 669 * @chan: ioat channel to be cleaned up 670 * 671 * To prevent lock contention we defer cleanup when the locks are 672 * contended with a terminal timeout that forces cleanup and catches 673 * completion notification errors. 674 */ 675 static void ioat1_cleanup(struct ioat_dma_chan *ioat) 676 { 677 struct ioat_chan_common *chan = &ioat->base; 678 dma_addr_t phys_complete; 679 680 prefetch(chan->completion); 681 682 if (!spin_trylock_bh(&chan->cleanup_lock)) 683 return; 684 685 if (!ioat_cleanup_preamble(chan, &phys_complete)) { 686 spin_unlock_bh(&chan->cleanup_lock); 687 return; 688 } 689 690 if (!spin_trylock_bh(&ioat->desc_lock)) { 691 spin_unlock_bh(&chan->cleanup_lock); 692 return; 693 } 694 695 __cleanup(ioat, phys_complete); 696 697 spin_unlock_bh(&ioat->desc_lock); 698 spin_unlock_bh(&chan->cleanup_lock); 699 } 700 701 static void ioat1_timer_event(unsigned long data) 702 { 703 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); 704 struct ioat_chan_common *chan = &ioat->base; 705 706 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); 707 708 spin_lock_bh(&chan->cleanup_lock); 709 if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { 710 struct ioat_desc_sw *desc; 711 712 spin_lock_bh(&ioat->desc_lock); 713 714 /* restart active descriptors */ 715 desc = to_ioat_desc(ioat->used_desc.prev); 716 ioat_set_chainaddr(ioat, desc->txd.phys); 717 ioat_start(chan); 718 719 ioat->pending = 0; 720 set_bit(IOAT_COMPLETION_PENDING, &chan->state); 721 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 722 spin_unlock_bh(&ioat->desc_lock); 723 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 724 dma_addr_t phys_complete; 725 726 spin_lock_bh(&ioat->desc_lock); 727 /* if we haven't made progress and we have already 728 * acknowledged a pending completion once, then be more 729 * forceful with a restart 730 */ 731 if (ioat_cleanup_preamble(chan, &phys_complete)) 732 __cleanup(ioat, phys_complete); 733 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) 734 ioat1_reset_channel(ioat); 735 else { 736 u64 status = ioat_chansts(chan); 737 738 /* manually update the last completion address */ 739 if (ioat_chansts_to_addr(status) != 0) 740 *chan->completion = status; 741 742 set_bit(IOAT_COMPLETION_ACK, &chan->state); 743 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 744 } 745 spin_unlock_bh(&ioat->desc_lock); 746 } 747 spin_unlock_bh(&chan->cleanup_lock); 748 } 749 750 enum dma_status 751 ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 752 struct dma_tx_state *txstate) 753 { 754 struct ioat_chan_common *chan = to_chan_common(c); 755 struct ioatdma_device *device = chan->device; 756 enum dma_status ret; 757 758 ret = dma_cookie_status(c, cookie, txstate); 759 if (ret == DMA_COMPLETE) 760 return ret; 761 762 device->cleanup_fn((unsigned long) c); 763 764 return dma_cookie_status(c, cookie, txstate); 765 } 766 767 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) 768 { 769 struct ioat_chan_common *chan = &ioat->base; 770 struct ioat_desc_sw *desc; 771 struct ioat_dma_descriptor *hw; 772 773 spin_lock_bh(&ioat->desc_lock); 774 775 desc = ioat1_dma_get_next_descriptor(ioat); 776 777 if (!desc) { 778 dev_err(to_dev(chan), 779 "Unable to start null desc - get next desc failed\n"); 780 spin_unlock_bh(&ioat->desc_lock); 781 return; 782 } 783 784 hw = desc->hw; 785 hw->ctl = 0; 786 hw->ctl_f.null = 1; 787 hw->ctl_f.int_en = 1; 788 hw->ctl_f.compl_write = 1; 789 /* set size to non-zero value (channel returns error when size is 0) */ 790 hw->size = NULL_DESC_BUFFER_SIZE; 791 hw->src_addr = 0; 792 hw->dst_addr = 0; 793 async_tx_ack(&desc->txd); 794 hw->next = 0; 795 list_add_tail(&desc->node, &ioat->used_desc); 796 dump_desc_dbg(ioat, desc); 797 798 ioat_set_chainaddr(ioat, desc->txd.phys); 799 ioat_start(chan); 800 spin_unlock_bh(&ioat->desc_lock); 801 } 802 803 /* 804 * Perform a IOAT transaction to verify the HW works. 805 */ 806 #define IOAT_TEST_SIZE 2000 807 808 static void ioat_dma_test_callback(void *dma_async_param) 809 { 810 struct completion *cmp = dma_async_param; 811 812 complete(cmp); 813 } 814 815 /** 816 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. 817 * @device: device to be tested 818 */ 819 int ioat_dma_self_test(struct ioatdma_device *device) 820 { 821 int i; 822 u8 *src; 823 u8 *dest; 824 struct dma_device *dma = &device->common; 825 struct device *dev = &device->pdev->dev; 826 struct dma_chan *dma_chan; 827 struct dma_async_tx_descriptor *tx; 828 dma_addr_t dma_dest, dma_src; 829 dma_cookie_t cookie; 830 int err = 0; 831 struct completion cmp; 832 unsigned long tmo; 833 unsigned long flags; 834 835 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 836 if (!src) 837 return -ENOMEM; 838 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 839 if (!dest) { 840 kfree(src); 841 return -ENOMEM; 842 } 843 844 /* Fill in src buffer */ 845 for (i = 0; i < IOAT_TEST_SIZE; i++) 846 src[i] = (u8)i; 847 848 /* Start copy, using first DMA channel */ 849 dma_chan = container_of(dma->channels.next, struct dma_chan, 850 device_node); 851 if (dma->device_alloc_chan_resources(dma_chan) < 1) { 852 dev_err(dev, "selftest cannot allocate chan resource\n"); 853 err = -ENODEV; 854 goto out; 855 } 856 857 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 858 if (dma_mapping_error(dev, dma_src)) { 859 dev_err(dev, "mapping src buffer failed\n"); 860 goto free_resources; 861 } 862 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 863 if (dma_mapping_error(dev, dma_dest)) { 864 dev_err(dev, "mapping dest buffer failed\n"); 865 goto unmap_src; 866 } 867 flags = DMA_PREP_INTERRUPT; 868 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 869 IOAT_TEST_SIZE, flags); 870 if (!tx) { 871 dev_err(dev, "Self-test prep failed, disabling\n"); 872 err = -ENODEV; 873 goto unmap_dma; 874 } 875 876 async_tx_ack(tx); 877 init_completion(&cmp); 878 tx->callback = ioat_dma_test_callback; 879 tx->callback_param = &cmp; 880 cookie = tx->tx_submit(tx); 881 if (cookie < 0) { 882 dev_err(dev, "Self-test setup failed, disabling\n"); 883 err = -ENODEV; 884 goto unmap_dma; 885 } 886 dma->device_issue_pending(dma_chan); 887 888 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 889 890 if (tmo == 0 || 891 dma->device_tx_status(dma_chan, cookie, NULL) 892 != DMA_COMPLETE) { 893 dev_err(dev, "Self-test copy timed out, disabling\n"); 894 err = -ENODEV; 895 goto unmap_dma; 896 } 897 if (memcmp(src, dest, IOAT_TEST_SIZE)) { 898 dev_err(dev, "Self-test copy failed compare, disabling\n"); 899 err = -ENODEV; 900 goto free_resources; 901 } 902 903 unmap_dma: 904 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 905 unmap_src: 906 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 907 free_resources: 908 dma->device_free_chan_resources(dma_chan); 909 out: 910 kfree(src); 911 kfree(dest); 912 return err; 913 } 914 915 static char ioat_interrupt_style[32] = "msix"; 916 module_param_string(ioat_interrupt_style, ioat_interrupt_style, 917 sizeof(ioat_interrupt_style), 0644); 918 MODULE_PARM_DESC(ioat_interrupt_style, 919 "set ioat interrupt style: msix (default), msi, intx"); 920 921 /** 922 * ioat_dma_setup_interrupts - setup interrupt handler 923 * @device: ioat device 924 */ 925 int ioat_dma_setup_interrupts(struct ioatdma_device *device) 926 { 927 struct ioat_chan_common *chan; 928 struct pci_dev *pdev = device->pdev; 929 struct device *dev = &pdev->dev; 930 struct msix_entry *msix; 931 int i, j, msixcnt; 932 int err = -EINVAL; 933 u8 intrctrl = 0; 934 935 if (!strcmp(ioat_interrupt_style, "msix")) 936 goto msix; 937 if (!strcmp(ioat_interrupt_style, "msi")) 938 goto msi; 939 if (!strcmp(ioat_interrupt_style, "intx")) 940 goto intx; 941 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); 942 goto err_no_irq; 943 944 msix: 945 /* The number of MSI-X vectors should equal the number of channels */ 946 msixcnt = device->common.chancnt; 947 for (i = 0; i < msixcnt; i++) 948 device->msix_entries[i].entry = i; 949 950 err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); 951 if (err) 952 goto msi; 953 954 for (i = 0; i < msixcnt; i++) { 955 msix = &device->msix_entries[i]; 956 chan = ioat_chan_by_index(device, i); 957 err = devm_request_irq(dev, msix->vector, 958 ioat_dma_do_interrupt_msix, 0, 959 "ioat-msix", chan); 960 if (err) { 961 for (j = 0; j < i; j++) { 962 msix = &device->msix_entries[j]; 963 chan = ioat_chan_by_index(device, j); 964 devm_free_irq(dev, msix->vector, chan); 965 } 966 goto msi; 967 } 968 } 969 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; 970 device->irq_mode = IOAT_MSIX; 971 goto done; 972 973 msi: 974 err = pci_enable_msi(pdev); 975 if (err) 976 goto intx; 977 978 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, 979 "ioat-msi", device); 980 if (err) { 981 pci_disable_msi(pdev); 982 goto intx; 983 } 984 device->irq_mode = IOAT_MSI; 985 goto done; 986 987 intx: 988 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 989 IRQF_SHARED, "ioat-intx", device); 990 if (err) 991 goto err_no_irq; 992 993 device->irq_mode = IOAT_INTX; 994 done: 995 if (device->intr_quirk) 996 device->intr_quirk(device); 997 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; 998 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); 999 return 0; 1000 1001 err_no_irq: 1002 /* Disable all interrupt generation */ 1003 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 1004 device->irq_mode = IOAT_NOIRQ; 1005 dev_err(dev, "no usable interrupts\n"); 1006 return err; 1007 } 1008 EXPORT_SYMBOL(ioat_dma_setup_interrupts); 1009 1010 static void ioat_disable_interrupts(struct ioatdma_device *device) 1011 { 1012 /* Disable all interrupt generation */ 1013 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 1014 } 1015 1016 int ioat_probe(struct ioatdma_device *device) 1017 { 1018 int err = -ENODEV; 1019 struct dma_device *dma = &device->common; 1020 struct pci_dev *pdev = device->pdev; 1021 struct device *dev = &pdev->dev; 1022 1023 /* DMA coherent memory pool for DMA descriptor allocations */ 1024 device->dma_pool = pci_pool_create("dma_desc_pool", pdev, 1025 sizeof(struct ioat_dma_descriptor), 1026 64, 0); 1027 if (!device->dma_pool) { 1028 err = -ENOMEM; 1029 goto err_dma_pool; 1030 } 1031 1032 device->completion_pool = pci_pool_create("completion_pool", pdev, 1033 sizeof(u64), SMP_CACHE_BYTES, 1034 SMP_CACHE_BYTES); 1035 1036 if (!device->completion_pool) { 1037 err = -ENOMEM; 1038 goto err_completion_pool; 1039 } 1040 1041 device->enumerate_channels(device); 1042 1043 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 1044 dma->dev = &pdev->dev; 1045 1046 if (!dma->chancnt) { 1047 dev_err(dev, "channel enumeration error\n"); 1048 goto err_setup_interrupts; 1049 } 1050 1051 err = ioat_dma_setup_interrupts(device); 1052 if (err) 1053 goto err_setup_interrupts; 1054 1055 err = device->self_test(device); 1056 if (err) 1057 goto err_self_test; 1058 1059 return 0; 1060 1061 err_self_test: 1062 ioat_disable_interrupts(device); 1063 err_setup_interrupts: 1064 pci_pool_destroy(device->completion_pool); 1065 err_completion_pool: 1066 pci_pool_destroy(device->dma_pool); 1067 err_dma_pool: 1068 return err; 1069 } 1070 1071 int ioat_register(struct ioatdma_device *device) 1072 { 1073 int err = dma_async_device_register(&device->common); 1074 1075 if (err) { 1076 ioat_disable_interrupts(device); 1077 pci_pool_destroy(device->completion_pool); 1078 pci_pool_destroy(device->dma_pool); 1079 } 1080 1081 return err; 1082 } 1083 1084 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ 1085 static void ioat1_intr_quirk(struct ioatdma_device *device) 1086 { 1087 struct pci_dev *pdev = device->pdev; 1088 u32 dmactrl; 1089 1090 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); 1091 if (pdev->msi_enabled) 1092 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; 1093 else 1094 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; 1095 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); 1096 } 1097 1098 static ssize_t ring_size_show(struct dma_chan *c, char *page) 1099 { 1100 struct ioat_dma_chan *ioat = to_ioat_chan(c); 1101 1102 return sprintf(page, "%d\n", ioat->desccount); 1103 } 1104 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); 1105 1106 static ssize_t ring_active_show(struct dma_chan *c, char *page) 1107 { 1108 struct ioat_dma_chan *ioat = to_ioat_chan(c); 1109 1110 return sprintf(page, "%d\n", ioat->active); 1111 } 1112 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); 1113 1114 static ssize_t cap_show(struct dma_chan *c, char *page) 1115 { 1116 struct dma_device *dma = c->device; 1117 1118 return sprintf(page, "copy%s%s%s%s%s\n", 1119 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", 1120 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", 1121 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", 1122 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", 1123 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); 1124 1125 } 1126 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); 1127 1128 static ssize_t version_show(struct dma_chan *c, char *page) 1129 { 1130 struct dma_device *dma = c->device; 1131 struct ioatdma_device *device = to_ioatdma_device(dma); 1132 1133 return sprintf(page, "%d.%d\n", 1134 device->version >> 4, device->version & 0xf); 1135 } 1136 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); 1137 1138 static struct attribute *ioat1_attrs[] = { 1139 &ring_size_attr.attr, 1140 &ring_active_attr.attr, 1141 &ioat_cap_attr.attr, 1142 &ioat_version_attr.attr, 1143 NULL, 1144 }; 1145 1146 static ssize_t 1147 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 1148 { 1149 struct ioat_sysfs_entry *entry; 1150 struct ioat_chan_common *chan; 1151 1152 entry = container_of(attr, struct ioat_sysfs_entry, attr); 1153 chan = container_of(kobj, struct ioat_chan_common, kobj); 1154 1155 if (!entry->show) 1156 return -EIO; 1157 return entry->show(&chan->common, page); 1158 } 1159 1160 const struct sysfs_ops ioat_sysfs_ops = { 1161 .show = ioat_attr_show, 1162 }; 1163 1164 static struct kobj_type ioat1_ktype = { 1165 .sysfs_ops = &ioat_sysfs_ops, 1166 .default_attrs = ioat1_attrs, 1167 }; 1168 1169 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) 1170 { 1171 struct dma_device *dma = &device->common; 1172 struct dma_chan *c; 1173 1174 list_for_each_entry(c, &dma->channels, device_node) { 1175 struct ioat_chan_common *chan = to_chan_common(c); 1176 struct kobject *parent = &c->dev->device.kobj; 1177 int err; 1178 1179 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata"); 1180 if (err) { 1181 dev_warn(to_dev(chan), 1182 "sysfs init error (%d), continuing...\n", err); 1183 kobject_put(&chan->kobj); 1184 set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state); 1185 } 1186 } 1187 } 1188 1189 void ioat_kobject_del(struct ioatdma_device *device) 1190 { 1191 struct dma_device *dma = &device->common; 1192 struct dma_chan *c; 1193 1194 list_for_each_entry(c, &dma->channels, device_node) { 1195 struct ioat_chan_common *chan = to_chan_common(c); 1196 1197 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) { 1198 kobject_del(&chan->kobj); 1199 kobject_put(&chan->kobj); 1200 } 1201 } 1202 } 1203 1204 int ioat1_dma_probe(struct ioatdma_device *device, int dca) 1205 { 1206 struct pci_dev *pdev = device->pdev; 1207 struct dma_device *dma; 1208 int err; 1209 1210 device->intr_quirk = ioat1_intr_quirk; 1211 device->enumerate_channels = ioat1_enumerate_channels; 1212 device->self_test = ioat_dma_self_test; 1213 device->timer_fn = ioat1_timer_event; 1214 device->cleanup_fn = ioat1_cleanup_event; 1215 dma = &device->common; 1216 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; 1217 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; 1218 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; 1219 dma->device_free_chan_resources = ioat1_dma_free_chan_resources; 1220 dma->device_tx_status = ioat_dma_tx_status; 1221 1222 err = ioat_probe(device); 1223 if (err) 1224 return err; 1225 err = ioat_register(device); 1226 if (err) 1227 return err; 1228 ioat_kobject_add(device, &ioat1_ktype); 1229 1230 if (dca) 1231 device->dca = ioat_dca_init(pdev, device->reg_base); 1232 1233 return err; 1234 } 1235 1236 void ioat_dma_remove(struct ioatdma_device *device) 1237 { 1238 struct dma_device *dma = &device->common; 1239 1240 ioat_disable_interrupts(device); 1241 1242 ioat_kobject_del(device); 1243 1244 dma_async_device_unregister(dma); 1245 1246 pci_pool_destroy(device->dma_pool); 1247 pci_pool_destroy(device->completion_pool); 1248 1249 INIT_LIST_HEAD(&dma->channels); 1250 } 1251