xref: /linux/drivers/dma/ioat/dma.c (revision bcdc4bd356c76a5bab2f480a73f089dc8e0e4e89)
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2015 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * The full GNU General Public License is included in this distribution in
15  * the file called "COPYING".
16  *
17  */
18 
19 /*
20  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
21  * copy operations.
22  */
23 
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include <linux/sizes.h>
35 #include "dma.h"
36 #include "registers.h"
37 #include "hw.h"
38 
39 #include "../dmaengine.h"
40 
41 static char *chanerr_str[] = {
42 	"DMA Transfer Source Address Error",
43 	"DMA Transfer Destination Address Error",
44 	"Next Descriptor Address Error",
45 	"Descriptor Error",
46 	"Chan Address Value Error",
47 	"CHANCMD Error",
48 	"Chipset Uncorrectable Data Integrity Error",
49 	"DMA Uncorrectable Data Integrity Error",
50 	"Read Data Error",
51 	"Write Data Error",
52 	"Descriptor Control Error",
53 	"Descriptor Transfer Size Error",
54 	"Completion Address Error",
55 	"Interrupt Configuration Error",
56 	"Super extended descriptor Address Error",
57 	"Unaffiliated Error",
58 	"CRC or XOR P Error",
59 	"XOR Q Error",
60 	"Descriptor Count Error",
61 	"DIF All F detect Error",
62 	"Guard Tag verification Error",
63 	"Application Tag verification Error",
64 	"Reference Tag verification Error",
65 	"Bundle Bit Error",
66 	"Result DIF All F detect Error",
67 	"Result Guard Tag verification Error",
68 	"Result Application Tag verification Error",
69 	"Result Reference Tag verification Error",
70 };
71 
72 static void ioat_eh(struct ioatdma_chan *ioat_chan);
73 
74 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
75 {
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
79 		if ((chanerr >> i) & 1) {
80 			dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
81 				i, chanerr_str[i]);
82 		}
83 	}
84 }
85 
86 /**
87  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
88  * @irq: interrupt id
89  * @data: interrupt data
90  */
91 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
92 {
93 	struct ioatdma_device *instance = data;
94 	struct ioatdma_chan *ioat_chan;
95 	unsigned long attnstatus;
96 	int bit;
97 	u8 intrctrl;
98 
99 	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
100 
101 	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
102 		return IRQ_NONE;
103 
104 	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
105 		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
106 		return IRQ_NONE;
107 	}
108 
109 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
110 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
111 		ioat_chan = ioat_chan_by_index(instance, bit);
112 		if (test_bit(IOAT_RUN, &ioat_chan->state))
113 			tasklet_schedule(&ioat_chan->cleanup_task);
114 	}
115 
116 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
117 	return IRQ_HANDLED;
118 }
119 
120 /**
121  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
122  * @irq: interrupt id
123  * @data: interrupt data
124  */
125 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
126 {
127 	struct ioatdma_chan *ioat_chan = data;
128 
129 	if (test_bit(IOAT_RUN, &ioat_chan->state))
130 		tasklet_schedule(&ioat_chan->cleanup_task);
131 
132 	return IRQ_HANDLED;
133 }
134 
135 void ioat_stop(struct ioatdma_chan *ioat_chan)
136 {
137 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
138 	struct pci_dev *pdev = ioat_dma->pdev;
139 	int chan_id = chan_num(ioat_chan);
140 	struct msix_entry *msix;
141 
142 	/* 1/ stop irq from firing tasklets
143 	 * 2/ stop the tasklet from re-arming irqs
144 	 */
145 	clear_bit(IOAT_RUN, &ioat_chan->state);
146 
147 	/* flush inflight interrupts */
148 	switch (ioat_dma->irq_mode) {
149 	case IOAT_MSIX:
150 		msix = &ioat_dma->msix_entries[chan_id];
151 		synchronize_irq(msix->vector);
152 		break;
153 	case IOAT_MSI:
154 	case IOAT_INTX:
155 		synchronize_irq(pdev->irq);
156 		break;
157 	default:
158 		break;
159 	}
160 
161 	/* flush inflight timers */
162 	del_timer_sync(&ioat_chan->timer);
163 
164 	/* flush inflight tasklet runs */
165 	tasklet_kill(&ioat_chan->cleanup_task);
166 
167 	/* final cleanup now that everything is quiesced and can't re-arm */
168 	ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
169 }
170 
171 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
172 {
173 	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
174 	ioat_chan->issued = ioat_chan->head;
175 	writew(ioat_chan->dmacount,
176 	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
177 	dev_dbg(to_dev(ioat_chan),
178 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
179 		__func__, ioat_chan->head, ioat_chan->tail,
180 		ioat_chan->issued, ioat_chan->dmacount);
181 }
182 
183 void ioat_issue_pending(struct dma_chan *c)
184 {
185 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
186 
187 	if (ioat_ring_pending(ioat_chan)) {
188 		spin_lock_bh(&ioat_chan->prep_lock);
189 		__ioat_issue_pending(ioat_chan);
190 		spin_unlock_bh(&ioat_chan->prep_lock);
191 	}
192 }
193 
194 /**
195  * ioat_update_pending - log pending descriptors
196  * @ioat: ioat+ channel
197  *
198  * Check if the number of unsubmitted descriptors has exceeded the
199  * watermark.  Called with prep_lock held
200  */
201 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
202 {
203 	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
204 		__ioat_issue_pending(ioat_chan);
205 }
206 
207 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
208 {
209 	struct ioat_ring_ent *desc;
210 	struct ioat_dma_descriptor *hw;
211 
212 	if (ioat_ring_space(ioat_chan) < 1) {
213 		dev_err(to_dev(ioat_chan),
214 			"Unable to start null desc - ring full\n");
215 		return;
216 	}
217 
218 	dev_dbg(to_dev(ioat_chan),
219 		"%s: head: %#x tail: %#x issued: %#x\n",
220 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
221 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
222 
223 	hw = desc->hw;
224 	hw->ctl = 0;
225 	hw->ctl_f.null = 1;
226 	hw->ctl_f.int_en = 1;
227 	hw->ctl_f.compl_write = 1;
228 	/* set size to non-zero value (channel returns error when size is 0) */
229 	hw->size = NULL_DESC_BUFFER_SIZE;
230 	hw->src_addr = 0;
231 	hw->dst_addr = 0;
232 	async_tx_ack(&desc->txd);
233 	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
234 	dump_desc_dbg(ioat_chan, desc);
235 	/* make sure descriptors are written before we submit */
236 	wmb();
237 	ioat_chan->head += 1;
238 	__ioat_issue_pending(ioat_chan);
239 }
240 
241 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
242 {
243 	spin_lock_bh(&ioat_chan->prep_lock);
244 	if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
245 		__ioat_start_null_desc(ioat_chan);
246 	spin_unlock_bh(&ioat_chan->prep_lock);
247 }
248 
249 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
250 {
251 	/* set the tail to be re-issued */
252 	ioat_chan->issued = ioat_chan->tail;
253 	ioat_chan->dmacount = 0;
254 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
255 
256 	dev_dbg(to_dev(ioat_chan),
257 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
258 		__func__, ioat_chan->head, ioat_chan->tail,
259 		ioat_chan->issued, ioat_chan->dmacount);
260 
261 	if (ioat_ring_pending(ioat_chan)) {
262 		struct ioat_ring_ent *desc;
263 
264 		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
265 		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
266 		__ioat_issue_pending(ioat_chan);
267 	} else
268 		__ioat_start_null_desc(ioat_chan);
269 }
270 
271 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
272 {
273 	unsigned long end = jiffies + tmo;
274 	int err = 0;
275 	u32 status;
276 
277 	status = ioat_chansts(ioat_chan);
278 	if (is_ioat_active(status) || is_ioat_idle(status))
279 		ioat_suspend(ioat_chan);
280 	while (is_ioat_active(status) || is_ioat_idle(status)) {
281 		if (tmo && time_after(jiffies, end)) {
282 			err = -ETIMEDOUT;
283 			break;
284 		}
285 		status = ioat_chansts(ioat_chan);
286 		cpu_relax();
287 	}
288 
289 	return err;
290 }
291 
292 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
293 {
294 	unsigned long end = jiffies + tmo;
295 	int err = 0;
296 
297 	ioat_reset(ioat_chan);
298 	while (ioat_reset_pending(ioat_chan)) {
299 		if (end && time_after(jiffies, end)) {
300 			err = -ETIMEDOUT;
301 			break;
302 		}
303 		cpu_relax();
304 	}
305 
306 	return err;
307 }
308 
309 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
310 	__releases(&ioat_chan->prep_lock)
311 {
312 	struct dma_chan *c = tx->chan;
313 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
314 	dma_cookie_t cookie;
315 
316 	cookie = dma_cookie_assign(tx);
317 	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
318 
319 	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
320 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
321 
322 	/* make descriptor updates visible before advancing ioat->head,
323 	 * this is purposefully not smp_wmb() since we are also
324 	 * publishing the descriptor updates to a dma device
325 	 */
326 	wmb();
327 
328 	ioat_chan->head += ioat_chan->produce;
329 
330 	ioat_update_pending(ioat_chan);
331 	spin_unlock_bh(&ioat_chan->prep_lock);
332 
333 	return cookie;
334 }
335 
336 static struct ioat_ring_ent *
337 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
338 {
339 	struct ioat_dma_descriptor *hw;
340 	struct ioat_ring_ent *desc;
341 	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
342 	int chunk;
343 	dma_addr_t phys;
344 	u8 *pos;
345 	off_t offs;
346 
347 	chunk = idx / IOAT_DESCS_PER_2M;
348 	idx &= (IOAT_DESCS_PER_2M - 1);
349 	offs = idx * IOAT_DESC_SZ;
350 	pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
351 	phys = ioat_chan->descs[chunk].hw + offs;
352 	hw = (struct ioat_dma_descriptor *)pos;
353 	memset(hw, 0, sizeof(*hw));
354 
355 	desc = kmem_cache_zalloc(ioat_cache, flags);
356 	if (!desc)
357 		return NULL;
358 
359 	dma_async_tx_descriptor_init(&desc->txd, chan);
360 	desc->txd.tx_submit = ioat_tx_submit_unlock;
361 	desc->hw = hw;
362 	desc->txd.phys = phys;
363 	return desc;
364 }
365 
366 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
367 {
368 	kmem_cache_free(ioat_cache, desc);
369 }
370 
371 struct ioat_ring_ent **
372 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
373 {
374 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375 	struct ioat_ring_ent **ring;
376 	int total_descs = 1 << order;
377 	int i, chunks;
378 
379 	/* allocate the array to hold the software ring */
380 	ring = kcalloc(total_descs, sizeof(*ring), flags);
381 	if (!ring)
382 		return NULL;
383 
384 	ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
385 
386 	for (i = 0; i < chunks; i++) {
387 		struct ioat_descs *descs = &ioat_chan->descs[i];
388 
389 		descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
390 						 SZ_2M, &descs->hw, flags);
391 		if (!descs->virt && (i > 0)) {
392 			int idx;
393 
394 			for (idx = 0; idx < i; idx++) {
395 				dma_free_coherent(to_dev(ioat_chan), SZ_2M,
396 						  descs->virt, descs->hw);
397 				descs->virt = NULL;
398 				descs->hw = 0;
399 			}
400 
401 			ioat_chan->desc_chunks = 0;
402 			kfree(ring);
403 			return NULL;
404 		}
405 	}
406 
407 	for (i = 0; i < total_descs; i++) {
408 		ring[i] = ioat_alloc_ring_ent(c, i, flags);
409 		if (!ring[i]) {
410 			int idx;
411 
412 			while (i--)
413 				ioat_free_ring_ent(ring[i], c);
414 
415 			for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
416 				dma_free_coherent(to_dev(ioat_chan),
417 						  SZ_2M,
418 						  ioat_chan->descs[idx].virt,
419 						  ioat_chan->descs[idx].hw);
420 				ioat_chan->descs[idx].virt = NULL;
421 				ioat_chan->descs[idx].hw = 0;
422 			}
423 
424 			ioat_chan->desc_chunks = 0;
425 			kfree(ring);
426 			return NULL;
427 		}
428 		set_desc_id(ring[i], i);
429 	}
430 
431 	/* link descs */
432 	for (i = 0; i < total_descs-1; i++) {
433 		struct ioat_ring_ent *next = ring[i+1];
434 		struct ioat_dma_descriptor *hw = ring[i]->hw;
435 
436 		hw->next = next->txd.phys;
437 	}
438 	ring[i]->hw->next = ring[0]->txd.phys;
439 
440 	return ring;
441 }
442 
443 /**
444  * ioat_check_space_lock - verify space and grab ring producer lock
445  * @ioat: ioat,3 channel (ring) to operate on
446  * @num_descs: allocation length
447  */
448 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
449 	__acquires(&ioat_chan->prep_lock)
450 {
451 	spin_lock_bh(&ioat_chan->prep_lock);
452 	/* never allow the last descriptor to be consumed, we need at
453 	 * least one free at all times to allow for on-the-fly ring
454 	 * resizing.
455 	 */
456 	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
457 		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
458 			__func__, num_descs, ioat_chan->head,
459 			ioat_chan->tail, ioat_chan->issued);
460 		ioat_chan->produce = num_descs;
461 		return 0;  /* with ioat->prep_lock held */
462 	}
463 	spin_unlock_bh(&ioat_chan->prep_lock);
464 
465 	dev_dbg_ratelimited(to_dev(ioat_chan),
466 			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
467 			    __func__, num_descs, ioat_chan->head,
468 			    ioat_chan->tail, ioat_chan->issued);
469 
470 	/* progress reclaim in the allocation failure case we may be
471 	 * called under bh_disabled so we need to trigger the timer
472 	 * event directly
473 	 */
474 	if (time_is_before_jiffies(ioat_chan->timer.expires)
475 	    && timer_pending(&ioat_chan->timer)) {
476 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
477 		ioat_timer_event(&ioat_chan->timer);
478 	}
479 
480 	return -ENOMEM;
481 }
482 
483 static bool desc_has_ext(struct ioat_ring_ent *desc)
484 {
485 	struct ioat_dma_descriptor *hw = desc->hw;
486 
487 	if (hw->ctl_f.op == IOAT_OP_XOR ||
488 	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
489 		struct ioat_xor_descriptor *xor = desc->xor;
490 
491 		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
492 			return true;
493 	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
494 		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
495 		struct ioat_pq_descriptor *pq = desc->pq;
496 
497 		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
498 			return true;
499 	}
500 
501 	return false;
502 }
503 
504 static void
505 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
506 {
507 	if (!sed)
508 		return;
509 
510 	dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
511 	kmem_cache_free(ioat_sed_cache, sed);
512 }
513 
514 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
515 {
516 	u64 phys_complete;
517 	u64 completion;
518 
519 	completion = *ioat_chan->completion;
520 	phys_complete = ioat_chansts_to_addr(completion);
521 
522 	dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
523 		(unsigned long long) phys_complete);
524 
525 	return phys_complete;
526 }
527 
528 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
529 				   u64 *phys_complete)
530 {
531 	*phys_complete = ioat_get_current_completion(ioat_chan);
532 	if (*phys_complete == ioat_chan->last_completion)
533 		return false;
534 
535 	clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
536 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
537 
538 	return true;
539 }
540 
541 static void
542 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
543 {
544 	struct ioat_dma_descriptor *hw = desc->hw;
545 
546 	switch (hw->ctl_f.op) {
547 	case IOAT_OP_PQ_VAL:
548 	case IOAT_OP_PQ_VAL_16S:
549 	{
550 		struct ioat_pq_descriptor *pq = desc->pq;
551 
552 		/* check if there's error written */
553 		if (!pq->dwbes_f.wbes)
554 			return;
555 
556 		/* need to set a chanerr var for checking to clear later */
557 
558 		if (pq->dwbes_f.p_val_err)
559 			*desc->result |= SUM_CHECK_P_RESULT;
560 
561 		if (pq->dwbes_f.q_val_err)
562 			*desc->result |= SUM_CHECK_Q_RESULT;
563 
564 		return;
565 	}
566 	default:
567 		return;
568 	}
569 }
570 
571 /**
572  * __cleanup - reclaim used descriptors
573  * @ioat: channel (ring) to clean
574  */
575 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
576 {
577 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
578 	struct ioat_ring_ent *desc;
579 	bool seen_current = false;
580 	int idx = ioat_chan->tail, i;
581 	u16 active;
582 
583 	dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
584 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
585 
586 	/*
587 	 * At restart of the channel, the completion address and the
588 	 * channel status will be 0 due to starting a new chain. Since
589 	 * it's new chain and the first descriptor "fails", there is
590 	 * nothing to clean up. We do not want to reap the entire submitted
591 	 * chain due to this 0 address value and then BUG.
592 	 */
593 	if (!phys_complete)
594 		return;
595 
596 	active = ioat_ring_active(ioat_chan);
597 	for (i = 0; i < active && !seen_current; i++) {
598 		struct dma_async_tx_descriptor *tx;
599 
600 		smp_read_barrier_depends();
601 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
602 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
603 		dump_desc_dbg(ioat_chan, desc);
604 
605 		/* set err stat if we are using dwbes */
606 		if (ioat_dma->cap & IOAT_CAP_DWBES)
607 			desc_get_errstat(ioat_chan, desc);
608 
609 		tx = &desc->txd;
610 		if (tx->cookie) {
611 			dma_cookie_complete(tx);
612 			dma_descriptor_unmap(tx);
613 			dmaengine_desc_get_callback_invoke(tx, NULL);
614 			tx->callback = NULL;
615 			tx->callback_result = NULL;
616 		}
617 
618 		if (tx->phys == phys_complete)
619 			seen_current = true;
620 
621 		/* skip extended descriptors */
622 		if (desc_has_ext(desc)) {
623 			BUG_ON(i + 1 >= active);
624 			i++;
625 		}
626 
627 		/* cleanup super extended descriptors */
628 		if (desc->sed) {
629 			ioat_free_sed(ioat_dma, desc->sed);
630 			desc->sed = NULL;
631 		}
632 	}
633 
634 	/* finish all descriptor reads before incrementing tail */
635 	smp_mb();
636 	ioat_chan->tail = idx + i;
637 	/* no active descs have written a completion? */
638 	BUG_ON(active && !seen_current);
639 	ioat_chan->last_completion = phys_complete;
640 
641 	if (active - i == 0) {
642 		dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
643 			__func__);
644 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
645 	}
646 
647 	/* microsecond delay by sysfs variable  per pending descriptor */
648 	if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
649 		writew(min((ioat_chan->intr_coalesce * (active - i)),
650 		       IOAT_INTRDELAY_MASK),
651 		       ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
652 		ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
653 	}
654 }
655 
656 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
657 {
658 	u64 phys_complete;
659 
660 	spin_lock_bh(&ioat_chan->cleanup_lock);
661 
662 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
663 		__cleanup(ioat_chan, phys_complete);
664 
665 	if (is_ioat_halted(*ioat_chan->completion)) {
666 		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
667 
668 		if (chanerr &
669 		    (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
670 			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
671 			ioat_eh(ioat_chan);
672 		}
673 	}
674 
675 	spin_unlock_bh(&ioat_chan->cleanup_lock);
676 }
677 
678 void ioat_cleanup_event(unsigned long data)
679 {
680 	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
681 
682 	ioat_cleanup(ioat_chan);
683 	if (!test_bit(IOAT_RUN, &ioat_chan->state))
684 		return;
685 	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
686 }
687 
688 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
689 {
690 	u64 phys_complete;
691 
692 	ioat_quiesce(ioat_chan, 0);
693 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
694 		__cleanup(ioat_chan, phys_complete);
695 
696 	__ioat_restart_chan(ioat_chan);
697 }
698 
699 
700 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
701 {
702 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
703 	struct ioat_ring_ent *desc;
704 	u16 active;
705 	int idx = ioat_chan->tail, i;
706 
707 	/*
708 	 * We assume that the failed descriptor has been processed.
709 	 * Now we are just returning all the remaining submitted
710 	 * descriptors to abort.
711 	 */
712 	active = ioat_ring_active(ioat_chan);
713 
714 	/* we skip the failed descriptor that tail points to */
715 	for (i = 1; i < active; i++) {
716 		struct dma_async_tx_descriptor *tx;
717 
718 		smp_read_barrier_depends();
719 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
720 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
721 
722 		tx = &desc->txd;
723 		if (tx->cookie) {
724 			struct dmaengine_result res;
725 
726 			dma_cookie_complete(tx);
727 			dma_descriptor_unmap(tx);
728 			res.result = DMA_TRANS_ABORTED;
729 			dmaengine_desc_get_callback_invoke(tx, &res);
730 			tx->callback = NULL;
731 			tx->callback_result = NULL;
732 		}
733 
734 		/* skip extended descriptors */
735 		if (desc_has_ext(desc)) {
736 			WARN_ON(i + 1 >= active);
737 			i++;
738 		}
739 
740 		/* cleanup super extended descriptors */
741 		if (desc->sed) {
742 			ioat_free_sed(ioat_dma, desc->sed);
743 			desc->sed = NULL;
744 		}
745 	}
746 
747 	smp_mb(); /* finish all descriptor reads before incrementing tail */
748 	ioat_chan->tail = idx + active;
749 
750 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
751 	ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
752 }
753 
754 static void ioat_eh(struct ioatdma_chan *ioat_chan)
755 {
756 	struct pci_dev *pdev = to_pdev(ioat_chan);
757 	struct ioat_dma_descriptor *hw;
758 	struct dma_async_tx_descriptor *tx;
759 	u64 phys_complete;
760 	struct ioat_ring_ent *desc;
761 	u32 err_handled = 0;
762 	u32 chanerr_int;
763 	u32 chanerr;
764 	bool abort = false;
765 	struct dmaengine_result res;
766 
767 	/* cleanup so tail points to descriptor that caused the error */
768 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
769 		__cleanup(ioat_chan, phys_complete);
770 
771 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
772 	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
773 
774 	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
775 		__func__, chanerr, chanerr_int);
776 
777 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
778 	hw = desc->hw;
779 	dump_desc_dbg(ioat_chan, desc);
780 
781 	switch (hw->ctl_f.op) {
782 	case IOAT_OP_XOR_VAL:
783 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
784 			*desc->result |= SUM_CHECK_P_RESULT;
785 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
786 		}
787 		break;
788 	case IOAT_OP_PQ_VAL:
789 	case IOAT_OP_PQ_VAL_16S:
790 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
791 			*desc->result |= SUM_CHECK_P_RESULT;
792 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
793 		}
794 		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
795 			*desc->result |= SUM_CHECK_Q_RESULT;
796 			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
797 		}
798 		break;
799 	}
800 
801 	if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
802 		if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
803 			res.result = DMA_TRANS_READ_FAILED;
804 			err_handled |= IOAT_CHANERR_READ_DATA_ERR;
805 		} else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
806 			res.result = DMA_TRANS_WRITE_FAILED;
807 			err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
808 		}
809 
810 		abort = true;
811 	} else
812 		res.result = DMA_TRANS_NOERROR;
813 
814 	/* fault on unhandled error or spurious halt */
815 	if (chanerr ^ err_handled || chanerr == 0) {
816 		dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
817 			__func__, chanerr, err_handled);
818 		dev_err(to_dev(ioat_chan), "Errors handled:\n");
819 		ioat_print_chanerrs(ioat_chan, err_handled);
820 		dev_err(to_dev(ioat_chan), "Errors not handled:\n");
821 		ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
822 
823 		BUG();
824 	}
825 
826 	/* cleanup the faulty descriptor since we are continuing */
827 	tx = &desc->txd;
828 	if (tx->cookie) {
829 		dma_cookie_complete(tx);
830 		dma_descriptor_unmap(tx);
831 		dmaengine_desc_get_callback_invoke(tx, &res);
832 		tx->callback = NULL;
833 		tx->callback_result = NULL;
834 	}
835 
836 	/* mark faulting descriptor as complete */
837 	*ioat_chan->completion = desc->txd.phys;
838 
839 	spin_lock_bh(&ioat_chan->prep_lock);
840 	/* we need abort all descriptors */
841 	if (abort) {
842 		ioat_abort_descs(ioat_chan);
843 		/* clean up the channel, we could be in weird state */
844 		ioat_reset_hw(ioat_chan);
845 	}
846 
847 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
848 	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
849 
850 	ioat_restart_channel(ioat_chan);
851 	spin_unlock_bh(&ioat_chan->prep_lock);
852 }
853 
854 static void check_active(struct ioatdma_chan *ioat_chan)
855 {
856 	if (ioat_ring_active(ioat_chan)) {
857 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
858 		return;
859 	}
860 
861 	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
862 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
863 }
864 
865 void ioat_timer_event(struct timer_list *t)
866 {
867 	struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
868 	dma_addr_t phys_complete;
869 	u64 status;
870 
871 	status = ioat_chansts(ioat_chan);
872 
873 	/* when halted due to errors check for channel
874 	 * programming errors before advancing the completion state
875 	 */
876 	if (is_ioat_halted(status)) {
877 		u32 chanerr;
878 
879 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
880 		dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
881 			__func__, chanerr);
882 		dev_err(to_dev(ioat_chan), "Errors:\n");
883 		ioat_print_chanerrs(ioat_chan, chanerr);
884 
885 		if (test_bit(IOAT_RUN, &ioat_chan->state)) {
886 			spin_lock_bh(&ioat_chan->cleanup_lock);
887 			spin_lock_bh(&ioat_chan->prep_lock);
888 			set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
889 			spin_unlock_bh(&ioat_chan->prep_lock);
890 
891 			ioat_abort_descs(ioat_chan);
892 			dev_warn(to_dev(ioat_chan), "Reset channel...\n");
893 			ioat_reset_hw(ioat_chan);
894 			dev_warn(to_dev(ioat_chan), "Restart channel...\n");
895 			ioat_restart_channel(ioat_chan);
896 
897 			spin_lock_bh(&ioat_chan->prep_lock);
898 			clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
899 			spin_unlock_bh(&ioat_chan->prep_lock);
900 			spin_unlock_bh(&ioat_chan->cleanup_lock);
901 		}
902 
903 		return;
904 	}
905 
906 	spin_lock_bh(&ioat_chan->cleanup_lock);
907 
908 	/* handle the no-actives case */
909 	if (!ioat_ring_active(ioat_chan)) {
910 		spin_lock_bh(&ioat_chan->prep_lock);
911 		check_active(ioat_chan);
912 		spin_unlock_bh(&ioat_chan->prep_lock);
913 		spin_unlock_bh(&ioat_chan->cleanup_lock);
914 		return;
915 	}
916 
917 	/* if we haven't made progress and we have already
918 	 * acknowledged a pending completion once, then be more
919 	 * forceful with a restart
920 	 */
921 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
922 		__cleanup(ioat_chan, phys_complete);
923 	else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
924 		u32 chanerr;
925 
926 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
927 		dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
928 			status, chanerr);
929 		dev_err(to_dev(ioat_chan), "Errors:\n");
930 		ioat_print_chanerrs(ioat_chan, chanerr);
931 
932 		dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
933 			ioat_ring_active(ioat_chan));
934 
935 		spin_lock_bh(&ioat_chan->prep_lock);
936 		set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
937 		spin_unlock_bh(&ioat_chan->prep_lock);
938 
939 		ioat_abort_descs(ioat_chan);
940 		dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
941 		ioat_reset_hw(ioat_chan);
942 		dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
943 		ioat_restart_channel(ioat_chan);
944 
945 		spin_lock_bh(&ioat_chan->prep_lock);
946 		clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
947 		spin_unlock_bh(&ioat_chan->prep_lock);
948 		spin_unlock_bh(&ioat_chan->cleanup_lock);
949 		return;
950 	} else
951 		set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
952 
953 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
954 	spin_unlock_bh(&ioat_chan->cleanup_lock);
955 }
956 
957 enum dma_status
958 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
959 		struct dma_tx_state *txstate)
960 {
961 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
962 	enum dma_status ret;
963 
964 	ret = dma_cookie_status(c, cookie, txstate);
965 	if (ret == DMA_COMPLETE)
966 		return ret;
967 
968 	ioat_cleanup(ioat_chan);
969 
970 	return dma_cookie_status(c, cookie, txstate);
971 }
972 
973 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
974 {
975 	/* throw away whatever the channel was doing and get it
976 	 * initialized, with ioat3 specific workarounds
977 	 */
978 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
979 	struct pci_dev *pdev = ioat_dma->pdev;
980 	u32 chanerr;
981 	u16 dev_id;
982 	int err;
983 
984 	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
985 
986 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
987 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
988 
989 	if (ioat_dma->version < IOAT_VER_3_3) {
990 		/* clear any pending errors */
991 		err = pci_read_config_dword(pdev,
992 				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
993 		if (err) {
994 			dev_err(&pdev->dev,
995 				"channel error register unreachable\n");
996 			return err;
997 		}
998 		pci_write_config_dword(pdev,
999 				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1000 
1001 		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1002 		 * (workaround for spurious config parity error after restart)
1003 		 */
1004 		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1005 		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1006 			pci_write_config_dword(pdev,
1007 					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
1008 					       0x10);
1009 		}
1010 	}
1011 
1012 	if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1013 		ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1014 		ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1015 		ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1016 	}
1017 
1018 
1019 	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1020 	if (!err) {
1021 		if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1022 			writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1023 			writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1024 			writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1025 		}
1026 	}
1027 
1028 	if (err)
1029 		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
1030 
1031 	return err;
1032 }
1033