dma.c (599d49de7f69cb5a23e913db24e168ba2f09bd05) dma.c (3372de5813e4da8305002ff6ffbfc0c7012cb319)
1/*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 23 unchanged lines hidden (view full) ---

32#include <linux/workqueue.h>
33#include <linux/prefetch.h>
34#include "dma.h"
35#include "registers.h"
36#include "hw.h"
37
38#include "../dmaengine.h"
39
1/*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 23 unchanged lines hidden (view full) ---

32#include <linux/workqueue.h>
33#include <linux/prefetch.h>
34#include "dma.h"
35#include "registers.h"
36#include "hw.h"
37
38#include "../dmaengine.h"
39
40static void ioat_eh(struct ioatdma_chan *ioat_chan);
41
40/**
41 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
42 * @irq: interrupt id
43 * @data: interrupt data
44 */
45irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
46{
47 struct ioatdma_device *instance = data;

--- 69 unchanged lines hidden (view full) ---

117
118 /* flush inflight tasklet runs */
119 tasklet_kill(&ioat_chan->cleanup_task);
120
121 /* final cleanup now that everything is quiesced and can't re-arm */
122 ioat_dma->cleanup_fn((unsigned long)&ioat_chan->dma_chan);
123}
124
42/**
43 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
44 * @irq: interrupt id
45 * @data: interrupt data
46 */
47irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
48{
49 struct ioatdma_device *instance = data;

--- 69 unchanged lines hidden (view full) ---

119
120 /* flush inflight tasklet runs */
121 tasklet_kill(&ioat_chan->cleanup_task);
122
123 /* final cleanup now that everything is quiesced and can't re-arm */
124 ioat_dma->cleanup_fn((unsigned long)&ioat_chan->dma_chan);
125}
126
125dma_addr_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
127static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
126{
128{
127 dma_addr_t phys_complete;
128 u64 completion;
129
130 completion = *ioat_chan->completion;
131 phys_complete = ioat_chansts_to_addr(completion);
132
133 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
134 (unsigned long long) phys_complete);
135
136 if (is_ioat_halted(completion)) {
137 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
138
139 dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n",
140 chanerr);
141
142 /* TODO do something to salvage the situation */
143 }
144
145 return phys_complete;
146}
147
148bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
149 dma_addr_t *phys_complete)
150{
151 *phys_complete = ioat_get_current_completion(ioat_chan);
152 if (*phys_complete == ioat_chan->last_completion)
153 return false;
154 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
155 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
156
157 return true;
158}
159
160enum dma_status
161ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
162 struct dma_tx_state *txstate)
163{
164 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
165 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
166 enum dma_status ret;
167
168 ret = dma_cookie_status(c, cookie, txstate);
169 if (ret == DMA_COMPLETE)
170 return ret;
171
172 ioat_dma->cleanup_fn((unsigned long) c);
173
174 return dma_cookie_status(c, cookie, txstate);
175}
176
177void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
178{
179 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
180 ioat_chan->issued = ioat_chan->head;
181 writew(ioat_chan->dmacount,
182 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
183 dev_dbg(to_dev(ioat_chan),
184 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
185 __func__, ioat_chan->head, ioat_chan->tail,
186 ioat_chan->issued, ioat_chan->dmacount);

--- 59 unchanged lines hidden (view full) ---

246
247void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
248{
249 spin_lock_bh(&ioat_chan->prep_lock);
250 __ioat_start_null_desc(ioat_chan);
251 spin_unlock_bh(&ioat_chan->prep_lock);
252}
253
129 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
130 ioat_chan->issued = ioat_chan->head;
131 writew(ioat_chan->dmacount,
132 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
133 dev_dbg(to_dev(ioat_chan),
134 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
135 __func__, ioat_chan->head, ioat_chan->tail,
136 ioat_chan->issued, ioat_chan->dmacount);

--- 59 unchanged lines hidden (view full) ---

196
197void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
198{
199 spin_lock_bh(&ioat_chan->prep_lock);
200 __ioat_start_null_desc(ioat_chan);
201 spin_unlock_bh(&ioat_chan->prep_lock);
202}
203
254void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
204static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
255{
256 /* set the tail to be re-issued */
257 ioat_chan->issued = ioat_chan->tail;
258 ioat_chan->dmacount = 0;
259 set_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state);
260 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
261
262 dev_dbg(to_dev(ioat_chan),

--- 6 unchanged lines hidden (view full) ---

269
270 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
271 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
272 __ioat_issue_pending(ioat_chan);
273 } else
274 __ioat_start_null_desc(ioat_chan);
275}
276
205{
206 /* set the tail to be re-issued */
207 ioat_chan->issued = ioat_chan->tail;
208 ioat_chan->dmacount = 0;
209 set_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state);
210 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
211
212 dev_dbg(to_dev(ioat_chan),

--- 6 unchanged lines hidden (view full) ---

219
220 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
221 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
222 __ioat_issue_pending(ioat_chan);
223 } else
224 __ioat_start_null_desc(ioat_chan);
225}
226
277int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
227static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
278{
279 unsigned long end = jiffies + tmo;
280 int err = 0;
281 u32 status;
282
283 status = ioat_chansts(ioat_chan);
284 if (is_ioat_active(status) || is_ioat_idle(status))
285 ioat_suspend(ioat_chan);

--- 4 unchanged lines hidden (view full) ---

290 }
291 status = ioat_chansts(ioat_chan);
292 cpu_relax();
293 }
294
295 return err;
296}
297
228{
229 unsigned long end = jiffies + tmo;
230 int err = 0;
231 u32 status;
232
233 status = ioat_chansts(ioat_chan);
234 if (is_ioat_active(status) || is_ioat_idle(status))
235 ioat_suspend(ioat_chan);

--- 4 unchanged lines hidden (view full) ---

240 }
241 status = ioat_chansts(ioat_chan);
242 cpu_relax();
243 }
244
245 return err;
246}
247
298int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
248static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
299{
300 unsigned long end = jiffies + tmo;
301 int err = 0;
302
303 ioat_reset(ioat_chan);
304 while (ioat_reset_pending(ioat_chan)) {
305 if (end && time_after(jiffies, end)) {
306 err = -ETIMEDOUT;

--- 99 unchanged lines hidden (view full) ---

406
407 hw->next = next->txd.phys;
408 }
409 ring[i]->hw->next = ring[0]->txd.phys;
410
411 return ring;
412}
413
249{
250 unsigned long end = jiffies + tmo;
251 int err = 0;
252
253 ioat_reset(ioat_chan);
254 while (ioat_reset_pending(ioat_chan)) {
255 if (end && time_after(jiffies, end)) {
256 err = -ETIMEDOUT;

--- 99 unchanged lines hidden (view full) ---

356
357 hw->next = next->txd.phys;
358 }
359 ring[i]->hw->next = ring[0]->txd.phys;
360
361 return ring;
362}
363
414bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
364static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
415{
416 /* reshape differs from normal ring allocation in that we want
417 * to allocate a new software ring while only
418 * extending/truncating the hardware ring
419 */
420 struct dma_chan *c = &ioat_chan->dma_chan;
421 const u32 curr_size = ioat_ring_size(ioat_chan);
422 const u16 active = ioat_ring_active(ioat_chan);

--- 150 unchanged lines hidden (view full) ---

573 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
574
575 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
576 ioat_dma->timer_fn((unsigned long)ioat_chan);
577 }
578
579 return -ENOMEM;
580}
365{
366 /* reshape differs from normal ring allocation in that we want
367 * to allocate a new software ring while only
368 * extending/truncating the hardware ring
369 */
370 struct dma_chan *c = &ioat_chan->dma_chan;
371 const u32 curr_size = ioat_ring_size(ioat_chan);
372 const u16 active = ioat_ring_active(ioat_chan);

--- 150 unchanged lines hidden (view full) ---

523 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
524
525 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
526 ioat_dma->timer_fn((unsigned long)ioat_chan);
527 }
528
529 return -ENOMEM;
530}
531
532static bool desc_has_ext(struct ioat_ring_ent *desc)
533{
534 struct ioat_dma_descriptor *hw = desc->hw;
535
536 if (hw->ctl_f.op == IOAT_OP_XOR ||
537 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
538 struct ioat_xor_descriptor *xor = desc->xor;
539
540 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
541 return true;
542 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
543 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
544 struct ioat_pq_descriptor *pq = desc->pq;
545
546 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
547 return true;
548 }
549
550 return false;
551}
552
553static void
554ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
555{
556 if (!sed)
557 return;
558
559 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
560 kmem_cache_free(ioat_sed_cache, sed);
561}
562
563static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
564{
565 u64 phys_complete;
566 u64 completion;
567
568 completion = *ioat_chan->completion;
569 phys_complete = ioat_chansts_to_addr(completion);
570
571 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
572 (unsigned long long) phys_complete);
573
574 return phys_complete;
575}
576
577static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
578 u64 *phys_complete)
579{
580 *phys_complete = ioat_get_current_completion(ioat_chan);
581 if (*phys_complete == ioat_chan->last_completion)
582 return false;
583
584 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
585 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
586
587 return true;
588}
589
590static void
591desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
592{
593 struct ioat_dma_descriptor *hw = desc->hw;
594
595 switch (hw->ctl_f.op) {
596 case IOAT_OP_PQ_VAL:
597 case IOAT_OP_PQ_VAL_16S:
598 {
599 struct ioat_pq_descriptor *pq = desc->pq;
600
601 /* check if there's error written */
602 if (!pq->dwbes_f.wbes)
603 return;
604
605 /* need to set a chanerr var for checking to clear later */
606
607 if (pq->dwbes_f.p_val_err)
608 *desc->result |= SUM_CHECK_P_RESULT;
609
610 if (pq->dwbes_f.q_val_err)
611 *desc->result |= SUM_CHECK_Q_RESULT;
612
613 return;
614 }
615 default:
616 return;
617 }
618}
619
620/**
621 * __cleanup - reclaim used descriptors
622 * @ioat: channel (ring) to clean
623 */
624static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
625{
626 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
627 struct ioat_ring_ent *desc;
628 bool seen_current = false;
629 int idx = ioat_chan->tail, i;
630 u16 active;
631
632 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
633 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
634
635 /*
636 * At restart of the channel, the completion address and the
637 * channel status will be 0 due to starting a new chain. Since
638 * it's new chain and the first descriptor "fails", there is
639 * nothing to clean up. We do not want to reap the entire submitted
640 * chain due to this 0 address value and then BUG.
641 */
642 if (!phys_complete)
643 return;
644
645 active = ioat_ring_active(ioat_chan);
646 for (i = 0; i < active && !seen_current; i++) {
647 struct dma_async_tx_descriptor *tx;
648
649 smp_read_barrier_depends();
650 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
651 desc = ioat_get_ring_ent(ioat_chan, idx + i);
652 dump_desc_dbg(ioat_chan, desc);
653
654 /* set err stat if we are using dwbes */
655 if (ioat_dma->cap & IOAT_CAP_DWBES)
656 desc_get_errstat(ioat_chan, desc);
657
658 tx = &desc->txd;
659 if (tx->cookie) {
660 dma_cookie_complete(tx);
661 dma_descriptor_unmap(tx);
662 if (tx->callback) {
663 tx->callback(tx->callback_param);
664 tx->callback = NULL;
665 }
666 }
667
668 if (tx->phys == phys_complete)
669 seen_current = true;
670
671 /* skip extended descriptors */
672 if (desc_has_ext(desc)) {
673 BUG_ON(i + 1 >= active);
674 i++;
675 }
676
677 /* cleanup super extended descriptors */
678 if (desc->sed) {
679 ioat_free_sed(ioat_dma, desc->sed);
680 desc->sed = NULL;
681 }
682 }
683
684 /* finish all descriptor reads before incrementing tail */
685 smp_mb();
686 ioat_chan->tail = idx + i;
687 /* no active descs have written a completion? */
688 BUG_ON(active && !seen_current);
689 ioat_chan->last_completion = phys_complete;
690
691 if (active - i == 0) {
692 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
693 __func__);
694 clear_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state);
695 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
696 }
697
698 /* 5 microsecond delay per pending descriptor */
699 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
700 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
701}
702
703static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
704{
705 u64 phys_complete;
706
707 spin_lock_bh(&ioat_chan->cleanup_lock);
708
709 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
710 __cleanup(ioat_chan, phys_complete);
711
712 if (is_ioat_halted(*ioat_chan->completion)) {
713 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
714
715 if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
716 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
717 ioat_eh(ioat_chan);
718 }
719 }
720
721 spin_unlock_bh(&ioat_chan->cleanup_lock);
722}
723
724void ioat_cleanup_event(unsigned long data)
725{
726 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
727
728 ioat_cleanup(ioat_chan);
729 if (!test_bit(IOAT_RUN, &ioat_chan->state))
730 return;
731 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
732}
733
734static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
735{
736 u64 phys_complete;
737
738 ioat_quiesce(ioat_chan, 0);
739 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
740 __cleanup(ioat_chan, phys_complete);
741
742 __ioat_restart_chan(ioat_chan);
743}
744
745static void ioat_eh(struct ioatdma_chan *ioat_chan)
746{
747 struct pci_dev *pdev = to_pdev(ioat_chan);
748 struct ioat_dma_descriptor *hw;
749 struct dma_async_tx_descriptor *tx;
750 u64 phys_complete;
751 struct ioat_ring_ent *desc;
752 u32 err_handled = 0;
753 u32 chanerr_int;
754 u32 chanerr;
755
756 /* cleanup so tail points to descriptor that caused the error */
757 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
758 __cleanup(ioat_chan, phys_complete);
759
760 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
761 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
762
763 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
764 __func__, chanerr, chanerr_int);
765
766 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
767 hw = desc->hw;
768 dump_desc_dbg(ioat_chan, desc);
769
770 switch (hw->ctl_f.op) {
771 case IOAT_OP_XOR_VAL:
772 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
773 *desc->result |= SUM_CHECK_P_RESULT;
774 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
775 }
776 break;
777 case IOAT_OP_PQ_VAL:
778 case IOAT_OP_PQ_VAL_16S:
779 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
780 *desc->result |= SUM_CHECK_P_RESULT;
781 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
782 }
783 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
784 *desc->result |= SUM_CHECK_Q_RESULT;
785 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
786 }
787 break;
788 }
789
790 /* fault on unhandled error or spurious halt */
791 if (chanerr ^ err_handled || chanerr == 0) {
792 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
793 __func__, chanerr, err_handled);
794 BUG();
795 } else { /* cleanup the faulty descriptor */
796 tx = &desc->txd;
797 if (tx->cookie) {
798 dma_cookie_complete(tx);
799 dma_descriptor_unmap(tx);
800 if (tx->callback) {
801 tx->callback(tx->callback_param);
802 tx->callback = NULL;
803 }
804 }
805 }
806
807 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
808 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
809
810 /* mark faulting descriptor as complete */
811 *ioat_chan->completion = desc->txd.phys;
812
813 spin_lock_bh(&ioat_chan->prep_lock);
814 ioat_restart_channel(ioat_chan);
815 spin_unlock_bh(&ioat_chan->prep_lock);
816}
817
818static void check_active(struct ioatdma_chan *ioat_chan)
819{
820 if (ioat_ring_active(ioat_chan)) {
821 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
822 return;
823 }
824
825 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
826 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
827 else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
828 /* if the ring is idle, empty, and oversized try to step
829 * down the size
830 */
831 reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
832
833 /* keep shrinking until we get back to our minimum
834 * default size
835 */
836 if (ioat_chan->alloc_order > ioat_get_alloc_order())
837 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
838 }
839
840}
841
842void ioat_timer_event(unsigned long data)
843{
844 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
845 dma_addr_t phys_complete;
846 u64 status;
847
848 status = ioat_chansts(ioat_chan);
849
850 /* when halted due to errors check for channel
851 * programming errors before advancing the completion state
852 */
853 if (is_ioat_halted(status)) {
854 u32 chanerr;
855
856 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
857 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
858 __func__, chanerr);
859 if (test_bit(IOAT_RUN, &ioat_chan->state))
860 BUG_ON(is_ioat_bug(chanerr));
861 else /* we never got off the ground */
862 return;
863 }
864
865 /* if we haven't made progress and we have already
866 * acknowledged a pending completion once, then be more
867 * forceful with a restart
868 */
869 spin_lock_bh(&ioat_chan->cleanup_lock);
870 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
871 __cleanup(ioat_chan, phys_complete);
872 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
873 spin_lock_bh(&ioat_chan->prep_lock);
874 ioat_restart_channel(ioat_chan);
875 spin_unlock_bh(&ioat_chan->prep_lock);
876 spin_unlock_bh(&ioat_chan->cleanup_lock);
877 return;
878 } else {
879 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
880 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
881 }
882
883
884 if (ioat_ring_active(ioat_chan))
885 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
886 else {
887 spin_lock_bh(&ioat_chan->prep_lock);
888 check_active(ioat_chan);
889 spin_unlock_bh(&ioat_chan->prep_lock);
890 }
891 spin_unlock_bh(&ioat_chan->cleanup_lock);
892}
893
894enum dma_status
895ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
896 struct dma_tx_state *txstate)
897{
898 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
899 enum dma_status ret;
900
901 ret = dma_cookie_status(c, cookie, txstate);
902 if (ret == DMA_COMPLETE)
903 return ret;
904
905 ioat_cleanup(ioat_chan);
906
907 return dma_cookie_status(c, cookie, txstate);
908}
909
910static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
911{
912 struct pci_dev *pdev = ioat_dma->pdev;
913 int irq = pdev->irq, i;
914
915 if (!is_bwd_ioat(pdev))
916 return 0;
917
918 switch (ioat_dma->irq_mode) {
919 case IOAT_MSIX:
920 for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
921 struct msix_entry *msix = &ioat_dma->msix_entries[i];
922 struct ioatdma_chan *ioat_chan;
923
924 ioat_chan = ioat_chan_by_index(ioat_dma, i);
925 devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
926 }
927
928 pci_disable_msix(pdev);
929 break;
930 case IOAT_MSI:
931 pci_disable_msi(pdev);
932 /* fall through */
933 case IOAT_INTX:
934 devm_free_irq(&pdev->dev, irq, ioat_dma);
935 break;
936 default:
937 return 0;
938 }
939 ioat_dma->irq_mode = IOAT_NOIRQ;
940
941 return ioat_dma_setup_interrupts(ioat_dma);
942}
943
944int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
945{
946 /* throw away whatever the channel was doing and get it
947 * initialized, with ioat3 specific workarounds
948 */
949 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
950 struct pci_dev *pdev = ioat_dma->pdev;
951 u32 chanerr;
952 u16 dev_id;
953 int err;
954
955 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
956
957 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
958 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
959
960 if (ioat_dma->version < IOAT_VER_3_3) {
961 /* clear any pending errors */
962 err = pci_read_config_dword(pdev,
963 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
964 if (err) {
965 dev_err(&pdev->dev,
966 "channel error register unreachable\n");
967 return err;
968 }
969 pci_write_config_dword(pdev,
970 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
971
972 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
973 * (workaround for spurious config parity error after restart)
974 */
975 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
976 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
977 pci_write_config_dword(pdev,
978 IOAT_PCI_DMAUNCERRSTS_OFFSET,
979 0x10);
980 }
981 }
982
983 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
984 if (!err)
985 err = ioat_irq_reinit(ioat_dma);
986
987 if (err)
988 dev_err(&pdev->dev, "Failed to reset: %d\n", err);
989
990 return err;
991}