xref: /linux/drivers/dma/pch_dma.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Topcliff PCH DMA controller driver
3  * Copyright (c) 2010 Intel Corporation
4  * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/pch_dma.h>
24 
25 #include "dmaengine.h"
26 
27 #define DRV_NAME "pch-dma"
28 
29 #define DMA_CTL0_DISABLE		0x0
30 #define DMA_CTL0_SG			0x1
31 #define DMA_CTL0_ONESHOT		0x2
32 #define DMA_CTL0_MODE_MASK_BITS		0x3
33 #define DMA_CTL0_DIR_SHIFT_BITS		2
34 #define DMA_CTL0_BITS_PER_CH		4
35 
36 #define DMA_CTL2_START_SHIFT_BITS	8
37 #define DMA_CTL2_IRQ_ENABLE_MASK	((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
38 
39 #define DMA_STATUS_IDLE			0x0
40 #define DMA_STATUS_DESC_READ		0x1
41 #define DMA_STATUS_WAIT			0x2
42 #define DMA_STATUS_ACCESS		0x3
43 #define DMA_STATUS_BITS_PER_CH		2
44 #define DMA_STATUS_MASK_BITS		0x3
45 #define DMA_STATUS_SHIFT_BITS		16
46 #define DMA_STATUS_IRQ(x)		(0x1 << (x))
47 #define DMA_STATUS0_ERR(x)		(0x1 << ((x) + 8))
48 #define DMA_STATUS2_ERR(x)		(0x1 << (x))
49 
50 #define DMA_DESC_WIDTH_SHIFT_BITS	12
51 #define DMA_DESC_WIDTH_1_BYTE		(0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
52 #define DMA_DESC_WIDTH_2_BYTES		(0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
53 #define DMA_DESC_WIDTH_4_BYTES		(0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
54 #define DMA_DESC_MAX_COUNT_1_BYTE	0x3FF
55 #define DMA_DESC_MAX_COUNT_2_BYTES	0x3FF
56 #define DMA_DESC_MAX_COUNT_4_BYTES	0x7FF
57 #define DMA_DESC_END_WITHOUT_IRQ	0x0
58 #define DMA_DESC_END_WITH_IRQ		0x1
59 #define DMA_DESC_FOLLOW_WITHOUT_IRQ	0x2
60 #define DMA_DESC_FOLLOW_WITH_IRQ	0x3
61 
62 #define MAX_CHAN_NR			12
63 
64 #define DMA_MASK_CTL0_MODE	0x33333333
65 #define DMA_MASK_CTL2_MODE	0x00003333
66 
67 static unsigned int init_nr_desc_per_channel = 64;
68 module_param(init_nr_desc_per_channel, uint, 0644);
69 MODULE_PARM_DESC(init_nr_desc_per_channel,
70 		 "initial descriptors per channel (default: 64)");
71 
72 struct pch_dma_desc_regs {
73 	u32	dev_addr;
74 	u32	mem_addr;
75 	u32	size;
76 	u32	next;
77 };
78 
79 struct pch_dma_regs {
80 	u32	dma_ctl0;
81 	u32	dma_ctl1;
82 	u32	dma_ctl2;
83 	u32	dma_ctl3;
84 	u32	dma_sts0;
85 	u32	dma_sts1;
86 	u32	dma_sts2;
87 	u32	reserved3;
88 	struct pch_dma_desc_regs desc[MAX_CHAN_NR];
89 };
90 
91 struct pch_dma_desc {
92 	struct pch_dma_desc_regs regs;
93 	struct dma_async_tx_descriptor txd;
94 	struct list_head	desc_node;
95 	struct list_head	tx_list;
96 };
97 
98 struct pch_dma_chan {
99 	struct dma_chan		chan;
100 	void __iomem *membase;
101 	enum dma_transfer_direction dir;
102 	struct tasklet_struct	tasklet;
103 	unsigned long		err_status;
104 
105 	spinlock_t		lock;
106 
107 	struct list_head	active_list;
108 	struct list_head	queue;
109 	struct list_head	free_list;
110 	unsigned int		descs_allocated;
111 };
112 
113 #define PDC_DEV_ADDR	0x00
114 #define PDC_MEM_ADDR	0x04
115 #define PDC_SIZE	0x08
116 #define PDC_NEXT	0x0C
117 
118 #define channel_readl(pdc, name) \
119 	readl((pdc)->membase + PDC_##name)
120 #define channel_writel(pdc, name, val) \
121 	writel((val), (pdc)->membase + PDC_##name)
122 
123 struct pch_dma {
124 	struct dma_device	dma;
125 	void __iomem *membase;
126 	struct pci_pool		*pool;
127 	struct pch_dma_regs	regs;
128 	struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
129 	struct pch_dma_chan	channels[MAX_CHAN_NR];
130 };
131 
132 #define PCH_DMA_CTL0	0x00
133 #define PCH_DMA_CTL1	0x04
134 #define PCH_DMA_CTL2	0x08
135 #define PCH_DMA_CTL3	0x0C
136 #define PCH_DMA_STS0	0x10
137 #define PCH_DMA_STS1	0x14
138 #define PCH_DMA_STS2	0x18
139 
140 #define dma_readl(pd, name) \
141 	readl((pd)->membase + PCH_DMA_##name)
142 #define dma_writel(pd, name, val) \
143 	writel((val), (pd)->membase + PCH_DMA_##name)
144 
145 static inline
146 struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
147 {
148 	return container_of(txd, struct pch_dma_desc, txd);
149 }
150 
151 static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
152 {
153 	return container_of(chan, struct pch_dma_chan, chan);
154 }
155 
156 static inline struct pch_dma *to_pd(struct dma_device *ddev)
157 {
158 	return container_of(ddev, struct pch_dma, dma);
159 }
160 
161 static inline struct device *chan2dev(struct dma_chan *chan)
162 {
163 	return &chan->dev->device;
164 }
165 
166 static inline struct device *chan2parent(struct dma_chan *chan)
167 {
168 	return chan->dev->device.parent;
169 }
170 
171 static inline
172 struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
173 {
174 	return list_first_entry(&pd_chan->active_list,
175 				struct pch_dma_desc, desc_node);
176 }
177 
178 static inline
179 struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
180 {
181 	return list_first_entry(&pd_chan->queue,
182 				struct pch_dma_desc, desc_node);
183 }
184 
185 static void pdc_enable_irq(struct dma_chan *chan, int enable)
186 {
187 	struct pch_dma *pd = to_pd(chan->device);
188 	u32 val;
189 	int pos;
190 
191 	if (chan->chan_id < 8)
192 		pos = chan->chan_id;
193 	else
194 		pos = chan->chan_id + 8;
195 
196 	val = dma_readl(pd, CTL2);
197 
198 	if (enable)
199 		val |= 0x1 << pos;
200 	else
201 		val &= ~(0x1 << pos);
202 
203 	dma_writel(pd, CTL2, val);
204 
205 	dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
206 		chan->chan_id, val);
207 }
208 
209 static void pdc_set_dir(struct dma_chan *chan)
210 {
211 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
212 	struct pch_dma *pd = to_pd(chan->device);
213 	u32 val;
214 	u32 mask_mode;
215 	u32 mask_ctl;
216 
217 	if (chan->chan_id < 8) {
218 		val = dma_readl(pd, CTL0);
219 
220 		mask_mode = DMA_CTL0_MODE_MASK_BITS <<
221 					(DMA_CTL0_BITS_PER_CH * chan->chan_id);
222 		mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
223 				       (DMA_CTL0_BITS_PER_CH * chan->chan_id));
224 		val &= mask_mode;
225 		if (pd_chan->dir == DMA_MEM_TO_DEV)
226 			val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
227 				       DMA_CTL0_DIR_SHIFT_BITS);
228 		else
229 			val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
230 					 DMA_CTL0_DIR_SHIFT_BITS));
231 
232 		val |= mask_ctl;
233 		dma_writel(pd, CTL0, val);
234 	} else {
235 		int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
236 		val = dma_readl(pd, CTL3);
237 
238 		mask_mode = DMA_CTL0_MODE_MASK_BITS <<
239 						(DMA_CTL0_BITS_PER_CH * ch);
240 		mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
241 						 (DMA_CTL0_BITS_PER_CH * ch));
242 		val &= mask_mode;
243 		if (pd_chan->dir == DMA_MEM_TO_DEV)
244 			val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
245 				       DMA_CTL0_DIR_SHIFT_BITS);
246 		else
247 			val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
248 					 DMA_CTL0_DIR_SHIFT_BITS));
249 		val |= mask_ctl;
250 		dma_writel(pd, CTL3, val);
251 	}
252 
253 	dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
254 		chan->chan_id, val);
255 }
256 
257 static void pdc_set_mode(struct dma_chan *chan, u32 mode)
258 {
259 	struct pch_dma *pd = to_pd(chan->device);
260 	u32 val;
261 	u32 mask_ctl;
262 	u32 mask_dir;
263 
264 	if (chan->chan_id < 8) {
265 		mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
266 			   (DMA_CTL0_BITS_PER_CH * chan->chan_id));
267 		mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
268 				 DMA_CTL0_DIR_SHIFT_BITS);
269 		val = dma_readl(pd, CTL0);
270 		val &= mask_dir;
271 		val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
272 		val |= mask_ctl;
273 		dma_writel(pd, CTL0, val);
274 	} else {
275 		int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
276 		mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
277 						 (DMA_CTL0_BITS_PER_CH * ch));
278 		mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
279 				 DMA_CTL0_DIR_SHIFT_BITS);
280 		val = dma_readl(pd, CTL3);
281 		val &= mask_dir;
282 		val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
283 		val |= mask_ctl;
284 		dma_writel(pd, CTL3, val);
285 	}
286 
287 	dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
288 		chan->chan_id, val);
289 }
290 
291 static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
292 {
293 	struct pch_dma *pd = to_pd(pd_chan->chan.device);
294 	u32 val;
295 
296 	val = dma_readl(pd, STS0);
297 	return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
298 			DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
299 }
300 
301 static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
302 {
303 	struct pch_dma *pd = to_pd(pd_chan->chan.device);
304 	u32 val;
305 
306 	val = dma_readl(pd, STS2);
307 	return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
308 			DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
309 }
310 
311 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
312 {
313 	u32 sts;
314 
315 	if (pd_chan->chan.chan_id < 8)
316 		sts = pdc_get_status0(pd_chan);
317 	else
318 		sts = pdc_get_status2(pd_chan);
319 
320 
321 	if (sts == DMA_STATUS_IDLE)
322 		return true;
323 	else
324 		return false;
325 }
326 
327 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
328 {
329 	if (!pdc_is_idle(pd_chan)) {
330 		dev_err(chan2dev(&pd_chan->chan),
331 			"BUG: Attempt to start non-idle channel\n");
332 		return;
333 	}
334 
335 	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
336 		pd_chan->chan.chan_id, desc->regs.dev_addr);
337 	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
338 		pd_chan->chan.chan_id, desc->regs.mem_addr);
339 	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
340 		pd_chan->chan.chan_id, desc->regs.size);
341 	dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
342 		pd_chan->chan.chan_id, desc->regs.next);
343 
344 	if (list_empty(&desc->tx_list)) {
345 		channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
346 		channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
347 		channel_writel(pd_chan, SIZE, desc->regs.size);
348 		channel_writel(pd_chan, NEXT, desc->regs.next);
349 		pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
350 	} else {
351 		channel_writel(pd_chan, NEXT, desc->txd.phys);
352 		pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
353 	}
354 }
355 
356 static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
357 			       struct pch_dma_desc *desc)
358 {
359 	struct dma_async_tx_descriptor *txd = &desc->txd;
360 	dma_async_tx_callback callback = txd->callback;
361 	void *param = txd->callback_param;
362 
363 	list_splice_init(&desc->tx_list, &pd_chan->free_list);
364 	list_move(&desc->desc_node, &pd_chan->free_list);
365 
366 	if (callback)
367 		callback(param);
368 }
369 
370 static void pdc_complete_all(struct pch_dma_chan *pd_chan)
371 {
372 	struct pch_dma_desc *desc, *_d;
373 	LIST_HEAD(list);
374 
375 	BUG_ON(!pdc_is_idle(pd_chan));
376 
377 	if (!list_empty(&pd_chan->queue))
378 		pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
379 
380 	list_splice_init(&pd_chan->active_list, &list);
381 	list_splice_init(&pd_chan->queue, &pd_chan->active_list);
382 
383 	list_for_each_entry_safe(desc, _d, &list, desc_node)
384 		pdc_chain_complete(pd_chan, desc);
385 }
386 
387 static void pdc_handle_error(struct pch_dma_chan *pd_chan)
388 {
389 	struct pch_dma_desc *bad_desc;
390 
391 	bad_desc = pdc_first_active(pd_chan);
392 	list_del(&bad_desc->desc_node);
393 
394 	list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
395 
396 	if (!list_empty(&pd_chan->active_list))
397 		pdc_dostart(pd_chan, pdc_first_active(pd_chan));
398 
399 	dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
400 	dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
401 		 bad_desc->txd.cookie);
402 
403 	pdc_chain_complete(pd_chan, bad_desc);
404 }
405 
406 static void pdc_advance_work(struct pch_dma_chan *pd_chan)
407 {
408 	if (list_empty(&pd_chan->active_list) ||
409 		list_is_singular(&pd_chan->active_list)) {
410 		pdc_complete_all(pd_chan);
411 	} else {
412 		pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
413 		pdc_dostart(pd_chan, pdc_first_active(pd_chan));
414 	}
415 }
416 
417 static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
418 {
419 	struct pch_dma_desc *desc = to_pd_desc(txd);
420 	struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
421 	dma_cookie_t cookie;
422 
423 	spin_lock(&pd_chan->lock);
424 	cookie = dma_cookie_assign(txd);
425 
426 	if (list_empty(&pd_chan->active_list)) {
427 		list_add_tail(&desc->desc_node, &pd_chan->active_list);
428 		pdc_dostart(pd_chan, desc);
429 	} else {
430 		list_add_tail(&desc->desc_node, &pd_chan->queue);
431 	}
432 
433 	spin_unlock(&pd_chan->lock);
434 	return 0;
435 }
436 
437 static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
438 {
439 	struct pch_dma_desc *desc = NULL;
440 	struct pch_dma *pd = to_pd(chan->device);
441 	dma_addr_t addr;
442 
443 	desc = pci_pool_alloc(pd->pool, flags, &addr);
444 	if (desc) {
445 		memset(desc, 0, sizeof(struct pch_dma_desc));
446 		INIT_LIST_HEAD(&desc->tx_list);
447 		dma_async_tx_descriptor_init(&desc->txd, chan);
448 		desc->txd.tx_submit = pd_tx_submit;
449 		desc->txd.flags = DMA_CTRL_ACK;
450 		desc->txd.phys = addr;
451 	}
452 
453 	return desc;
454 }
455 
456 static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
457 {
458 	struct pch_dma_desc *desc, *_d;
459 	struct pch_dma_desc *ret = NULL;
460 	int i = 0;
461 
462 	spin_lock(&pd_chan->lock);
463 	list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
464 		i++;
465 		if (async_tx_test_ack(&desc->txd)) {
466 			list_del(&desc->desc_node);
467 			ret = desc;
468 			break;
469 		}
470 		dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
471 	}
472 	spin_unlock(&pd_chan->lock);
473 	dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
474 
475 	if (!ret) {
476 		ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
477 		if (ret) {
478 			spin_lock(&pd_chan->lock);
479 			pd_chan->descs_allocated++;
480 			spin_unlock(&pd_chan->lock);
481 		} else {
482 			dev_err(chan2dev(&pd_chan->chan),
483 				"failed to alloc desc\n");
484 		}
485 	}
486 
487 	return ret;
488 }
489 
490 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
491 			 struct pch_dma_desc *desc)
492 {
493 	if (desc) {
494 		spin_lock(&pd_chan->lock);
495 		list_splice_init(&desc->tx_list, &pd_chan->free_list);
496 		list_add(&desc->desc_node, &pd_chan->free_list);
497 		spin_unlock(&pd_chan->lock);
498 	}
499 }
500 
501 static int pd_alloc_chan_resources(struct dma_chan *chan)
502 {
503 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
504 	struct pch_dma_desc *desc;
505 	LIST_HEAD(tmp_list);
506 	int i;
507 
508 	if (!pdc_is_idle(pd_chan)) {
509 		dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
510 		return -EIO;
511 	}
512 
513 	if (!list_empty(&pd_chan->free_list))
514 		return pd_chan->descs_allocated;
515 
516 	for (i = 0; i < init_nr_desc_per_channel; i++) {
517 		desc = pdc_alloc_desc(chan, GFP_KERNEL);
518 
519 		if (!desc) {
520 			dev_warn(chan2dev(chan),
521 				"Only allocated %d initial descriptors\n", i);
522 			break;
523 		}
524 
525 		list_add_tail(&desc->desc_node, &tmp_list);
526 	}
527 
528 	spin_lock_irq(&pd_chan->lock);
529 	list_splice(&tmp_list, &pd_chan->free_list);
530 	pd_chan->descs_allocated = i;
531 	dma_cookie_init(chan);
532 	spin_unlock_irq(&pd_chan->lock);
533 
534 	pdc_enable_irq(chan, 1);
535 
536 	return pd_chan->descs_allocated;
537 }
538 
539 static void pd_free_chan_resources(struct dma_chan *chan)
540 {
541 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
542 	struct pch_dma *pd = to_pd(chan->device);
543 	struct pch_dma_desc *desc, *_d;
544 	LIST_HEAD(tmp_list);
545 
546 	BUG_ON(!pdc_is_idle(pd_chan));
547 	BUG_ON(!list_empty(&pd_chan->active_list));
548 	BUG_ON(!list_empty(&pd_chan->queue));
549 
550 	spin_lock_irq(&pd_chan->lock);
551 	list_splice_init(&pd_chan->free_list, &tmp_list);
552 	pd_chan->descs_allocated = 0;
553 	spin_unlock_irq(&pd_chan->lock);
554 
555 	list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
556 		pci_pool_free(pd->pool, desc, desc->txd.phys);
557 
558 	pdc_enable_irq(chan, 0);
559 }
560 
561 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
562 				    struct dma_tx_state *txstate)
563 {
564 	return dma_cookie_status(chan, cookie, txstate);
565 }
566 
567 static void pd_issue_pending(struct dma_chan *chan)
568 {
569 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
570 
571 	if (pdc_is_idle(pd_chan)) {
572 		spin_lock(&pd_chan->lock);
573 		pdc_advance_work(pd_chan);
574 		spin_unlock(&pd_chan->lock);
575 	}
576 }
577 
578 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
579 			struct scatterlist *sgl, unsigned int sg_len,
580 			enum dma_transfer_direction direction, unsigned long flags,
581 			void *context)
582 {
583 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
584 	struct pch_dma_slave *pd_slave = chan->private;
585 	struct pch_dma_desc *first = NULL;
586 	struct pch_dma_desc *prev = NULL;
587 	struct pch_dma_desc *desc = NULL;
588 	struct scatterlist *sg;
589 	dma_addr_t reg;
590 	int i;
591 
592 	if (unlikely(!sg_len)) {
593 		dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
594 		return NULL;
595 	}
596 
597 	if (direction == DMA_DEV_TO_MEM)
598 		reg = pd_slave->rx_reg;
599 	else if (direction == DMA_MEM_TO_DEV)
600 		reg = pd_slave->tx_reg;
601 	else
602 		return NULL;
603 
604 	pd_chan->dir = direction;
605 	pdc_set_dir(chan);
606 
607 	for_each_sg(sgl, sg, sg_len, i) {
608 		desc = pdc_desc_get(pd_chan);
609 
610 		if (!desc)
611 			goto err_desc_get;
612 
613 		desc->regs.dev_addr = reg;
614 		desc->regs.mem_addr = sg_dma_address(sg);
615 		desc->regs.size = sg_dma_len(sg);
616 		desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
617 
618 		switch (pd_slave->width) {
619 		case PCH_DMA_WIDTH_1_BYTE:
620 			if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
621 				goto err_desc_get;
622 			desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
623 			break;
624 		case PCH_DMA_WIDTH_2_BYTES:
625 			if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
626 				goto err_desc_get;
627 			desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
628 			break;
629 		case PCH_DMA_WIDTH_4_BYTES:
630 			if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
631 				goto err_desc_get;
632 			desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
633 			break;
634 		default:
635 			goto err_desc_get;
636 		}
637 
638 		if (!first) {
639 			first = desc;
640 		} else {
641 			prev->regs.next |= desc->txd.phys;
642 			list_add_tail(&desc->desc_node, &first->tx_list);
643 		}
644 
645 		prev = desc;
646 	}
647 
648 	if (flags & DMA_PREP_INTERRUPT)
649 		desc->regs.next = DMA_DESC_END_WITH_IRQ;
650 	else
651 		desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
652 
653 	first->txd.cookie = -EBUSY;
654 	desc->txd.flags = flags;
655 
656 	return &first->txd;
657 
658 err_desc_get:
659 	dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
660 	pdc_desc_put(pd_chan, first);
661 	return NULL;
662 }
663 
664 static int pd_device_terminate_all(struct dma_chan *chan)
665 {
666 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
667 	struct pch_dma_desc *desc, *_d;
668 	LIST_HEAD(list);
669 
670 	spin_lock_irq(&pd_chan->lock);
671 
672 	pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
673 
674 	list_splice_init(&pd_chan->active_list, &list);
675 	list_splice_init(&pd_chan->queue, &list);
676 
677 	list_for_each_entry_safe(desc, _d, &list, desc_node)
678 		pdc_chain_complete(pd_chan, desc);
679 
680 	spin_unlock_irq(&pd_chan->lock);
681 
682 	return 0;
683 }
684 
685 static void pdc_tasklet(unsigned long data)
686 {
687 	struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
688 	unsigned long flags;
689 
690 	if (!pdc_is_idle(pd_chan)) {
691 		dev_err(chan2dev(&pd_chan->chan),
692 			"BUG: handle non-idle channel in tasklet\n");
693 		return;
694 	}
695 
696 	spin_lock_irqsave(&pd_chan->lock, flags);
697 	if (test_and_clear_bit(0, &pd_chan->err_status))
698 		pdc_handle_error(pd_chan);
699 	else
700 		pdc_advance_work(pd_chan);
701 	spin_unlock_irqrestore(&pd_chan->lock, flags);
702 }
703 
704 static irqreturn_t pd_irq(int irq, void *devid)
705 {
706 	struct pch_dma *pd = (struct pch_dma *)devid;
707 	struct pch_dma_chan *pd_chan;
708 	u32 sts0;
709 	u32 sts2;
710 	int i;
711 	int ret0 = IRQ_NONE;
712 	int ret2 = IRQ_NONE;
713 
714 	sts0 = dma_readl(pd, STS0);
715 	sts2 = dma_readl(pd, STS2);
716 
717 	dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
718 
719 	for (i = 0; i < pd->dma.chancnt; i++) {
720 		pd_chan = &pd->channels[i];
721 
722 		if (i < 8) {
723 			if (sts0 & DMA_STATUS_IRQ(i)) {
724 				if (sts0 & DMA_STATUS0_ERR(i))
725 					set_bit(0, &pd_chan->err_status);
726 
727 				tasklet_schedule(&pd_chan->tasklet);
728 				ret0 = IRQ_HANDLED;
729 			}
730 		} else {
731 			if (sts2 & DMA_STATUS_IRQ(i - 8)) {
732 				if (sts2 & DMA_STATUS2_ERR(i))
733 					set_bit(0, &pd_chan->err_status);
734 
735 				tasklet_schedule(&pd_chan->tasklet);
736 				ret2 = IRQ_HANDLED;
737 			}
738 		}
739 	}
740 
741 	/* clear interrupt bits in status register */
742 	if (ret0)
743 		dma_writel(pd, STS0, sts0);
744 	if (ret2)
745 		dma_writel(pd, STS2, sts2);
746 
747 	return ret0 | ret2;
748 }
749 
750 #ifdef	CONFIG_PM
751 static void pch_dma_save_regs(struct pch_dma *pd)
752 {
753 	struct pch_dma_chan *pd_chan;
754 	struct dma_chan *chan, *_c;
755 	int i = 0;
756 
757 	pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
758 	pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
759 	pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
760 	pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
761 
762 	list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
763 		pd_chan = to_pd_chan(chan);
764 
765 		pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
766 		pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
767 		pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
768 		pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
769 
770 		i++;
771 	}
772 }
773 
774 static void pch_dma_restore_regs(struct pch_dma *pd)
775 {
776 	struct pch_dma_chan *pd_chan;
777 	struct dma_chan *chan, *_c;
778 	int i = 0;
779 
780 	dma_writel(pd, CTL0, pd->regs.dma_ctl0);
781 	dma_writel(pd, CTL1, pd->regs.dma_ctl1);
782 	dma_writel(pd, CTL2, pd->regs.dma_ctl2);
783 	dma_writel(pd, CTL3, pd->regs.dma_ctl3);
784 
785 	list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
786 		pd_chan = to_pd_chan(chan);
787 
788 		channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
789 		channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
790 		channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
791 		channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
792 
793 		i++;
794 	}
795 }
796 
797 static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
798 {
799 	struct pch_dma *pd = pci_get_drvdata(pdev);
800 
801 	if (pd)
802 		pch_dma_save_regs(pd);
803 
804 	pci_save_state(pdev);
805 	pci_disable_device(pdev);
806 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
807 
808 	return 0;
809 }
810 
811 static int pch_dma_resume(struct pci_dev *pdev)
812 {
813 	struct pch_dma *pd = pci_get_drvdata(pdev);
814 	int err;
815 
816 	pci_set_power_state(pdev, PCI_D0);
817 	pci_restore_state(pdev);
818 
819 	err = pci_enable_device(pdev);
820 	if (err) {
821 		dev_dbg(&pdev->dev, "failed to enable device\n");
822 		return err;
823 	}
824 
825 	if (pd)
826 		pch_dma_restore_regs(pd);
827 
828 	return 0;
829 }
830 #endif
831 
832 static int pch_dma_probe(struct pci_dev *pdev,
833 				   const struct pci_device_id *id)
834 {
835 	struct pch_dma *pd;
836 	struct pch_dma_regs *regs;
837 	unsigned int nr_channels;
838 	int err;
839 	int i;
840 
841 	nr_channels = id->driver_data;
842 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
843 	if (!pd)
844 		return -ENOMEM;
845 
846 	pci_set_drvdata(pdev, pd);
847 
848 	err = pci_enable_device(pdev);
849 	if (err) {
850 		dev_err(&pdev->dev, "Cannot enable PCI device\n");
851 		goto err_free_mem;
852 	}
853 
854 	if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
855 		dev_err(&pdev->dev, "Cannot find proper base address\n");
856 		err = -ENODEV;
857 		goto err_disable_pdev;
858 	}
859 
860 	err = pci_request_regions(pdev, DRV_NAME);
861 	if (err) {
862 		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
863 		goto err_disable_pdev;
864 	}
865 
866 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
867 	if (err) {
868 		dev_err(&pdev->dev, "Cannot set proper DMA config\n");
869 		goto err_free_res;
870 	}
871 
872 	regs = pd->membase = pci_iomap(pdev, 1, 0);
873 	if (!pd->membase) {
874 		dev_err(&pdev->dev, "Cannot map MMIO registers\n");
875 		err = -ENOMEM;
876 		goto err_free_res;
877 	}
878 
879 	pci_set_master(pdev);
880 
881 	err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
882 	if (err) {
883 		dev_err(&pdev->dev, "Failed to request IRQ\n");
884 		goto err_iounmap;
885 	}
886 
887 	pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
888 				   sizeof(struct pch_dma_desc), 4, 0);
889 	if (!pd->pool) {
890 		dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
891 		err = -ENOMEM;
892 		goto err_free_irq;
893 	}
894 
895 	pd->dma.dev = &pdev->dev;
896 
897 	INIT_LIST_HEAD(&pd->dma.channels);
898 
899 	for (i = 0; i < nr_channels; i++) {
900 		struct pch_dma_chan *pd_chan = &pd->channels[i];
901 
902 		pd_chan->chan.device = &pd->dma;
903 		dma_cookie_init(&pd_chan->chan);
904 
905 		pd_chan->membase = &regs->desc[i];
906 
907 		spin_lock_init(&pd_chan->lock);
908 
909 		INIT_LIST_HEAD(&pd_chan->active_list);
910 		INIT_LIST_HEAD(&pd_chan->queue);
911 		INIT_LIST_HEAD(&pd_chan->free_list);
912 
913 		tasklet_init(&pd_chan->tasklet, pdc_tasklet,
914 			     (unsigned long)pd_chan);
915 		list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
916 	}
917 
918 	dma_cap_zero(pd->dma.cap_mask);
919 	dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
920 	dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
921 
922 	pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
923 	pd->dma.device_free_chan_resources = pd_free_chan_resources;
924 	pd->dma.device_tx_status = pd_tx_status;
925 	pd->dma.device_issue_pending = pd_issue_pending;
926 	pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
927 	pd->dma.device_terminate_all = pd_device_terminate_all;
928 
929 	err = dma_async_device_register(&pd->dma);
930 	if (err) {
931 		dev_err(&pdev->dev, "Failed to register DMA device\n");
932 		goto err_free_pool;
933 	}
934 
935 	return 0;
936 
937 err_free_pool:
938 	pci_pool_destroy(pd->pool);
939 err_free_irq:
940 	free_irq(pdev->irq, pd);
941 err_iounmap:
942 	pci_iounmap(pdev, pd->membase);
943 err_free_res:
944 	pci_release_regions(pdev);
945 err_disable_pdev:
946 	pci_disable_device(pdev);
947 err_free_mem:
948 	kfree(pd);
949 	return err;
950 }
951 
952 static void pch_dma_remove(struct pci_dev *pdev)
953 {
954 	struct pch_dma *pd = pci_get_drvdata(pdev);
955 	struct pch_dma_chan *pd_chan;
956 	struct dma_chan *chan, *_c;
957 
958 	if (pd) {
959 		dma_async_device_unregister(&pd->dma);
960 
961 		free_irq(pdev->irq, pd);
962 
963 		list_for_each_entry_safe(chan, _c, &pd->dma.channels,
964 					 device_node) {
965 			pd_chan = to_pd_chan(chan);
966 
967 			tasklet_kill(&pd_chan->tasklet);
968 		}
969 
970 		pci_pool_destroy(pd->pool);
971 		pci_iounmap(pdev, pd->membase);
972 		pci_release_regions(pdev);
973 		pci_disable_device(pdev);
974 		kfree(pd);
975 	}
976 }
977 
978 /* PCI Device ID of DMA device */
979 #define PCI_VENDOR_ID_ROHM             0x10DB
980 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
981 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
982 #define PCI_DEVICE_ID_ML7213_DMA1_8CH	0x8026
983 #define PCI_DEVICE_ID_ML7213_DMA2_8CH	0x802B
984 #define PCI_DEVICE_ID_ML7213_DMA3_4CH	0x8034
985 #define PCI_DEVICE_ID_ML7213_DMA4_12CH	0x8032
986 #define PCI_DEVICE_ID_ML7223_DMA1_4CH	0x800B
987 #define PCI_DEVICE_ID_ML7223_DMA2_4CH	0x800E
988 #define PCI_DEVICE_ID_ML7223_DMA3_4CH	0x8017
989 #define PCI_DEVICE_ID_ML7223_DMA4_4CH	0x803B
990 #define PCI_DEVICE_ID_ML7831_DMA1_8CH	0x8810
991 #define PCI_DEVICE_ID_ML7831_DMA2_4CH	0x8815
992 
993 static const struct pci_device_id pch_dma_id_table[] = {
994 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
995 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
996 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
997 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
998 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
999 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
1000 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
1001 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1002 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1003 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
1004 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
1005 	{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
1006 	{ 0, },
1007 };
1008 
1009 static struct pci_driver pch_dma_driver = {
1010 	.name		= DRV_NAME,
1011 	.id_table	= pch_dma_id_table,
1012 	.probe		= pch_dma_probe,
1013 	.remove		= pch_dma_remove,
1014 #ifdef CONFIG_PM
1015 	.suspend	= pch_dma_suspend,
1016 	.resume		= pch_dma_resume,
1017 #endif
1018 };
1019 
1020 module_pci_driver(pch_dma_driver);
1021 
1022 MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1023 		   "DMA controller driver");
1024 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1025 MODULE_LICENSE("GPL v2");
1026 MODULE_DEVICE_TABLE(pci, pch_dma_id_table);
1027