xref: /linux/drivers/dma/idma64.c (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1 /*
2  * Core driver for the Intel integrated DMA 64-bit
3  *
4  * Copyright (C) 2015 Intel Corporation
5  * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/bitops.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 
22 #include "idma64.h"
23 
24 /* Platform driver name */
25 #define DRV_NAME		"idma64"
26 
27 /* For now we support only two channels */
28 #define IDMA64_NR_CHAN		2
29 
30 /* ---------------------------------------------------------------------- */
31 
32 static struct device *chan2dev(struct dma_chan *chan)
33 {
34 	return &chan->dev->device;
35 }
36 
37 /* ---------------------------------------------------------------------- */
38 
39 static void idma64_off(struct idma64 *idma64)
40 {
41 	unsigned short count = 100;
42 
43 	dma_writel(idma64, CFG, 0);
44 
45 	channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
46 	channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
47 	channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
48 	channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
49 	channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
50 
51 	do {
52 		cpu_relax();
53 	} while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
54 }
55 
56 static void idma64_on(struct idma64 *idma64)
57 {
58 	dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
59 }
60 
61 /* ---------------------------------------------------------------------- */
62 
63 static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
64 {
65 	u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
66 	u32 cfglo = 0;
67 
68 	/* Enforce FIFO drain when channel is suspended */
69 	cfglo |= IDMA64C_CFGL_CH_DRAIN;
70 
71 	/* Set default burst alignment */
72 	cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
73 
74 	channel_writel(idma64c, CFG_LO, cfglo);
75 	channel_writel(idma64c, CFG_HI, cfghi);
76 
77 	/* Enable interrupts */
78 	channel_set_bit(idma64, MASK(XFER), idma64c->mask);
79 	channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
80 
81 	/*
82 	 * Enforce the controller to be turned on.
83 	 *
84 	 * The iDMA is turned off in ->probe() and looses context during system
85 	 * suspend / resume cycle. That's why we have to enable it each time we
86 	 * use it.
87 	 */
88 	idma64_on(idma64);
89 }
90 
91 static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
92 {
93 	channel_clear_bit(idma64, CH_EN, idma64c->mask);
94 }
95 
96 static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
97 {
98 	struct idma64_desc *desc = idma64c->desc;
99 	struct idma64_hw_desc *hw = &desc->hw[0];
100 
101 	channel_writeq(idma64c, SAR, 0);
102 	channel_writeq(idma64c, DAR, 0);
103 
104 	channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
105 	channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
106 
107 	channel_writeq(idma64c, LLP, hw->llp);
108 
109 	channel_set_bit(idma64, CH_EN, idma64c->mask);
110 }
111 
112 static void idma64_stop_transfer(struct idma64_chan *idma64c)
113 {
114 	struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
115 
116 	idma64_chan_stop(idma64, idma64c);
117 }
118 
119 static void idma64_start_transfer(struct idma64_chan *idma64c)
120 {
121 	struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
122 	struct virt_dma_desc *vdesc;
123 
124 	/* Get the next descriptor */
125 	vdesc = vchan_next_desc(&idma64c->vchan);
126 	if (!vdesc) {
127 		idma64c->desc = NULL;
128 		return;
129 	}
130 
131 	list_del(&vdesc->node);
132 	idma64c->desc = to_idma64_desc(vdesc);
133 
134 	/* Configure the channel */
135 	idma64_chan_init(idma64, idma64c);
136 
137 	/* Start the channel with a new descriptor */
138 	idma64_chan_start(idma64, idma64c);
139 }
140 
141 /* ---------------------------------------------------------------------- */
142 
143 static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
144 		u32 status_err, u32 status_xfer)
145 {
146 	struct idma64_chan *idma64c = &idma64->chan[c];
147 	struct idma64_desc *desc;
148 	unsigned long flags;
149 
150 	spin_lock_irqsave(&idma64c->vchan.lock, flags);
151 	desc = idma64c->desc;
152 	if (desc) {
153 		if (status_err & (1 << c)) {
154 			dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
155 			desc->status = DMA_ERROR;
156 		} else if (status_xfer & (1 << c)) {
157 			dma_writel(idma64, CLEAR(XFER), idma64c->mask);
158 			desc->status = DMA_COMPLETE;
159 			vchan_cookie_complete(&desc->vdesc);
160 			idma64_start_transfer(idma64c);
161 		}
162 
163 		/* idma64_start_transfer() updates idma64c->desc */
164 		if (idma64c->desc == NULL || desc->status == DMA_ERROR)
165 			idma64_stop_transfer(idma64c);
166 	}
167 	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
168 }
169 
170 static irqreturn_t idma64_irq(int irq, void *dev)
171 {
172 	struct idma64 *idma64 = dev;
173 	u32 status = dma_readl(idma64, STATUS_INT);
174 	u32 status_xfer;
175 	u32 status_err;
176 	unsigned short i;
177 
178 	dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
179 
180 	/* Check if we have any interrupt from the DMA controller */
181 	if (!status)
182 		return IRQ_NONE;
183 
184 	/* Disable interrupts */
185 	channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
186 	channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
187 
188 	status_xfer = dma_readl(idma64, RAW(XFER));
189 	status_err = dma_readl(idma64, RAW(ERROR));
190 
191 	for (i = 0; i < idma64->dma.chancnt; i++)
192 		idma64_chan_irq(idma64, i, status_err, status_xfer);
193 
194 	/* Re-enable interrupts */
195 	channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
196 	channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
197 
198 	return IRQ_HANDLED;
199 }
200 
201 /* ---------------------------------------------------------------------- */
202 
203 static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
204 {
205 	struct idma64_desc *desc;
206 
207 	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
208 	if (!desc)
209 		return NULL;
210 
211 	desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
212 	if (!desc->hw) {
213 		kfree(desc);
214 		return NULL;
215 	}
216 
217 	return desc;
218 }
219 
220 static void idma64_desc_free(struct idma64_chan *idma64c,
221 		struct idma64_desc *desc)
222 {
223 	struct idma64_hw_desc *hw;
224 
225 	if (desc->ndesc) {
226 		unsigned int i = desc->ndesc;
227 
228 		do {
229 			hw = &desc->hw[--i];
230 			dma_pool_free(idma64c->pool, hw->lli, hw->llp);
231 		} while (i);
232 	}
233 
234 	kfree(desc->hw);
235 	kfree(desc);
236 }
237 
238 static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
239 {
240 	struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
241 
242 	idma64_desc_free(idma64c, to_idma64_desc(vdesc));
243 }
244 
245 static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
246 		struct dma_slave_config *config,
247 		enum dma_transfer_direction direction, u64 llp)
248 {
249 	struct idma64_lli *lli = hw->lli;
250 	u64 sar, dar;
251 	u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
252 	u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
253 	u32 src_width, dst_width;
254 
255 	if (direction == DMA_MEM_TO_DEV) {
256 		sar = hw->phys;
257 		dar = config->dst_addr;
258 		ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
259 			 IDMA64C_CTLL_FC_M2P;
260 		src_width = min_t(u32, 2, __fls(sar | hw->len));
261 		dst_width = __fls(config->dst_addr_width);
262 	} else {	/* DMA_DEV_TO_MEM */
263 		sar = config->src_addr;
264 		dar = hw->phys;
265 		ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
266 			 IDMA64C_CTLL_FC_P2M;
267 		src_width = __fls(config->src_addr_width);
268 		dst_width = min_t(u32, 2, __fls(dar | hw->len));
269 	}
270 
271 	lli->sar = sar;
272 	lli->dar = dar;
273 
274 	lli->ctlhi = ctlhi;
275 	lli->ctllo = ctllo |
276 		     IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
277 		     IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
278 		     IDMA64C_CTLL_DST_WIDTH(dst_width) |
279 		     IDMA64C_CTLL_SRC_WIDTH(src_width);
280 
281 	lli->llp = llp;
282 	return hw->llp;
283 }
284 
285 static void idma64_desc_fill(struct idma64_chan *idma64c,
286 		struct idma64_desc *desc)
287 {
288 	struct dma_slave_config *config = &idma64c->config;
289 	struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1];
290 	struct idma64_lli *lli = hw->lli;
291 	u64 llp = 0;
292 	unsigned int i = desc->ndesc;
293 
294 	/* Fill the hardware descriptors and link them to a list */
295 	do {
296 		hw = &desc->hw[--i];
297 		llp = idma64_hw_desc_fill(hw, config, desc->direction, llp);
298 		desc->length += hw->len;
299 	} while (i);
300 
301 	/* Trigger interrupt after last block */
302 	lli->ctllo |= IDMA64C_CTLL_INT_EN;
303 }
304 
305 static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
306 		struct dma_chan *chan, struct scatterlist *sgl,
307 		unsigned int sg_len, enum dma_transfer_direction direction,
308 		unsigned long flags, void *context)
309 {
310 	struct idma64_chan *idma64c = to_idma64_chan(chan);
311 	struct idma64_desc *desc;
312 	struct scatterlist *sg;
313 	unsigned int i;
314 
315 	desc = idma64_alloc_desc(sg_len);
316 	if (!desc)
317 		return NULL;
318 
319 	for_each_sg(sgl, sg, sg_len, i) {
320 		struct idma64_hw_desc *hw = &desc->hw[i];
321 
322 		/* Allocate DMA capable memory for hardware descriptor */
323 		hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
324 		if (!hw->lli) {
325 			desc->ndesc = i;
326 			idma64_desc_free(idma64c, desc);
327 			return NULL;
328 		}
329 
330 		hw->phys = sg_dma_address(sg);
331 		hw->len = sg_dma_len(sg);
332 	}
333 
334 	desc->ndesc = sg_len;
335 	desc->direction = direction;
336 	desc->status = DMA_IN_PROGRESS;
337 
338 	idma64_desc_fill(idma64c, desc);
339 	return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
340 }
341 
342 static void idma64_issue_pending(struct dma_chan *chan)
343 {
344 	struct idma64_chan *idma64c = to_idma64_chan(chan);
345 	unsigned long flags;
346 
347 	spin_lock_irqsave(&idma64c->vchan.lock, flags);
348 	if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
349 		idma64_start_transfer(idma64c);
350 	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
351 }
352 
353 static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
354 {
355 	struct idma64_desc *desc = idma64c->desc;
356 	struct idma64_hw_desc *hw;
357 	size_t bytes = desc->length;
358 	u64 llp = channel_readq(idma64c, LLP);
359 	u32 ctlhi = channel_readl(idma64c, CTL_HI);
360 	unsigned int i = 0;
361 
362 	do {
363 		hw = &desc->hw[i];
364 		if (hw->llp == llp)
365 			break;
366 		bytes -= hw->len;
367 	} while (++i < desc->ndesc);
368 
369 	if (!i)
370 		return bytes;
371 
372 	/* The current chunk is not fully transfered yet */
373 	bytes += desc->hw[--i].len;
374 
375 	return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
376 }
377 
378 static enum dma_status idma64_tx_status(struct dma_chan *chan,
379 		dma_cookie_t cookie, struct dma_tx_state *state)
380 {
381 	struct idma64_chan *idma64c = to_idma64_chan(chan);
382 	struct virt_dma_desc *vdesc;
383 	enum dma_status status;
384 	size_t bytes;
385 	unsigned long flags;
386 
387 	status = dma_cookie_status(chan, cookie, state);
388 	if (status == DMA_COMPLETE)
389 		return status;
390 
391 	spin_lock_irqsave(&idma64c->vchan.lock, flags);
392 	vdesc = vchan_find_desc(&idma64c->vchan, cookie);
393 	if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
394 		bytes = idma64_active_desc_size(idma64c);
395 		dma_set_residue(state, bytes);
396 		status = idma64c->desc->status;
397 	} else if (vdesc) {
398 		bytes = to_idma64_desc(vdesc)->length;
399 		dma_set_residue(state, bytes);
400 	}
401 	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
402 
403 	return status;
404 }
405 
406 static void convert_burst(u32 *maxburst)
407 {
408 	if (*maxburst)
409 		*maxburst = __fls(*maxburst);
410 	else
411 		*maxburst = 0;
412 }
413 
414 static int idma64_slave_config(struct dma_chan *chan,
415 		struct dma_slave_config *config)
416 {
417 	struct idma64_chan *idma64c = to_idma64_chan(chan);
418 
419 	/* Check if chan will be configured for slave transfers */
420 	if (!is_slave_direction(config->direction))
421 		return -EINVAL;
422 
423 	memcpy(&idma64c->config, config, sizeof(idma64c->config));
424 
425 	convert_burst(&idma64c->config.src_maxburst);
426 	convert_burst(&idma64c->config.dst_maxburst);
427 
428 	return 0;
429 }
430 
431 static void idma64_chan_deactivate(struct idma64_chan *idma64c)
432 {
433 	unsigned short count = 100;
434 	u32 cfglo;
435 
436 	cfglo = channel_readl(idma64c, CFG_LO);
437 	channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
438 	do {
439 		udelay(1);
440 		cfglo = channel_readl(idma64c, CFG_LO);
441 	} while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
442 }
443 
444 static void idma64_chan_activate(struct idma64_chan *idma64c)
445 {
446 	u32 cfglo;
447 
448 	cfglo = channel_readl(idma64c, CFG_LO);
449 	channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
450 }
451 
452 static int idma64_pause(struct dma_chan *chan)
453 {
454 	struct idma64_chan *idma64c = to_idma64_chan(chan);
455 	unsigned long flags;
456 
457 	spin_lock_irqsave(&idma64c->vchan.lock, flags);
458 	if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
459 		idma64_chan_deactivate(idma64c);
460 		idma64c->desc->status = DMA_PAUSED;
461 	}
462 	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
463 
464 	return 0;
465 }
466 
467 static int idma64_resume(struct dma_chan *chan)
468 {
469 	struct idma64_chan *idma64c = to_idma64_chan(chan);
470 	unsigned long flags;
471 
472 	spin_lock_irqsave(&idma64c->vchan.lock, flags);
473 	if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
474 		idma64c->desc->status = DMA_IN_PROGRESS;
475 		idma64_chan_activate(idma64c);
476 	}
477 	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
478 
479 	return 0;
480 }
481 
482 static int idma64_terminate_all(struct dma_chan *chan)
483 {
484 	struct idma64_chan *idma64c = to_idma64_chan(chan);
485 	unsigned long flags;
486 	LIST_HEAD(head);
487 
488 	spin_lock_irqsave(&idma64c->vchan.lock, flags);
489 	idma64_chan_deactivate(idma64c);
490 	idma64_stop_transfer(idma64c);
491 	if (idma64c->desc) {
492 		idma64_vdesc_free(&idma64c->desc->vdesc);
493 		idma64c->desc = NULL;
494 	}
495 	vchan_get_all_descriptors(&idma64c->vchan, &head);
496 	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
497 
498 	vchan_dma_desc_free_list(&idma64c->vchan, &head);
499 	return 0;
500 }
501 
502 static int idma64_alloc_chan_resources(struct dma_chan *chan)
503 {
504 	struct idma64_chan *idma64c = to_idma64_chan(chan);
505 
506 	/* Create a pool of consistent memory blocks for hardware descriptors */
507 	idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
508 					chan->device->dev,
509 					sizeof(struct idma64_lli), 8, 0);
510 	if (!idma64c->pool) {
511 		dev_err(chan2dev(chan), "No memory for descriptors\n");
512 		return -ENOMEM;
513 	}
514 
515 	return 0;
516 }
517 
518 static void idma64_free_chan_resources(struct dma_chan *chan)
519 {
520 	struct idma64_chan *idma64c = to_idma64_chan(chan);
521 
522 	vchan_free_chan_resources(to_virt_chan(chan));
523 	dma_pool_destroy(idma64c->pool);
524 	idma64c->pool = NULL;
525 }
526 
527 /* ---------------------------------------------------------------------- */
528 
529 #define IDMA64_BUSWIDTHS				\
530 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		|	\
531 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		|	\
532 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
533 
534 static int idma64_probe(struct idma64_chip *chip)
535 {
536 	struct idma64 *idma64;
537 	unsigned short nr_chan = IDMA64_NR_CHAN;
538 	unsigned short i;
539 	int ret;
540 
541 	idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
542 	if (!idma64)
543 		return -ENOMEM;
544 
545 	idma64->regs = chip->regs;
546 	chip->idma64 = idma64;
547 
548 	idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
549 				    GFP_KERNEL);
550 	if (!idma64->chan)
551 		return -ENOMEM;
552 
553 	idma64->all_chan_mask = (1 << nr_chan) - 1;
554 
555 	/* Turn off iDMA controller */
556 	idma64_off(idma64);
557 
558 	ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
559 			       dev_name(chip->dev), idma64);
560 	if (ret)
561 		return ret;
562 
563 	INIT_LIST_HEAD(&idma64->dma.channels);
564 	for (i = 0; i < nr_chan; i++) {
565 		struct idma64_chan *idma64c = &idma64->chan[i];
566 
567 		idma64c->vchan.desc_free = idma64_vdesc_free;
568 		vchan_init(&idma64c->vchan, &idma64->dma);
569 
570 		idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
571 		idma64c->mask = BIT(i);
572 	}
573 
574 	dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
575 	dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
576 
577 	idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
578 	idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
579 
580 	idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
581 
582 	idma64->dma.device_issue_pending = idma64_issue_pending;
583 	idma64->dma.device_tx_status = idma64_tx_status;
584 
585 	idma64->dma.device_config = idma64_slave_config;
586 	idma64->dma.device_pause = idma64_pause;
587 	idma64->dma.device_resume = idma64_resume;
588 	idma64->dma.device_terminate_all = idma64_terminate_all;
589 
590 	idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
591 	idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
592 	idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
593 	idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
594 
595 	idma64->dma.dev = chip->dev;
596 
597 	ret = dma_async_device_register(&idma64->dma);
598 	if (ret)
599 		return ret;
600 
601 	dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
602 	return 0;
603 }
604 
605 static int idma64_remove(struct idma64_chip *chip)
606 {
607 	struct idma64 *idma64 = chip->idma64;
608 	unsigned short i;
609 
610 	dma_async_device_unregister(&idma64->dma);
611 
612 	/*
613 	 * Explicitly call devm_request_irq() to avoid the side effects with
614 	 * the scheduled tasklets.
615 	 */
616 	devm_free_irq(chip->dev, chip->irq, idma64);
617 
618 	for (i = 0; i < idma64->dma.chancnt; i++) {
619 		struct idma64_chan *idma64c = &idma64->chan[i];
620 
621 		tasklet_kill(&idma64c->vchan.task);
622 	}
623 
624 	return 0;
625 }
626 
627 /* ---------------------------------------------------------------------- */
628 
629 static int idma64_platform_probe(struct platform_device *pdev)
630 {
631 	struct idma64_chip *chip;
632 	struct device *dev = &pdev->dev;
633 	struct resource *mem;
634 	int ret;
635 
636 	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
637 	if (!chip)
638 		return -ENOMEM;
639 
640 	chip->irq = platform_get_irq(pdev, 0);
641 	if (chip->irq < 0)
642 		return chip->irq;
643 
644 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
645 	chip->regs = devm_ioremap_resource(dev, mem);
646 	if (IS_ERR(chip->regs))
647 		return PTR_ERR(chip->regs);
648 
649 	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
650 	if (ret)
651 		return ret;
652 
653 	chip->dev = dev;
654 
655 	ret = idma64_probe(chip);
656 	if (ret)
657 		return ret;
658 
659 	platform_set_drvdata(pdev, chip);
660 	return 0;
661 }
662 
663 static int idma64_platform_remove(struct platform_device *pdev)
664 {
665 	struct idma64_chip *chip = platform_get_drvdata(pdev);
666 
667 	return idma64_remove(chip);
668 }
669 
670 #ifdef CONFIG_PM_SLEEP
671 
672 static int idma64_pm_suspend(struct device *dev)
673 {
674 	struct platform_device *pdev = to_platform_device(dev);
675 	struct idma64_chip *chip = platform_get_drvdata(pdev);
676 
677 	idma64_off(chip->idma64);
678 	return 0;
679 }
680 
681 static int idma64_pm_resume(struct device *dev)
682 {
683 	struct platform_device *pdev = to_platform_device(dev);
684 	struct idma64_chip *chip = platform_get_drvdata(pdev);
685 
686 	idma64_on(chip->idma64);
687 	return 0;
688 }
689 
690 #endif /* CONFIG_PM_SLEEP */
691 
692 static const struct dev_pm_ops idma64_dev_pm_ops = {
693 	SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
694 };
695 
696 static struct platform_driver idma64_platform_driver = {
697 	.probe		= idma64_platform_probe,
698 	.remove		= idma64_platform_remove,
699 	.driver = {
700 		.name	= DRV_NAME,
701 		.pm	= &idma64_dev_pm_ops,
702 	},
703 };
704 
705 module_platform_driver(idma64_platform_driver);
706 
707 MODULE_LICENSE("GPL v2");
708 MODULE_DESCRIPTION("iDMA64 core driver");
709 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
710 MODULE_ALIAS("platform:" DRV_NAME);
711