xref: /linux/drivers/dma/hsu/hsu.c (revision b6ebbac51bedf9e98e837688bc838f400196da5e)
1 /*
2  * Core driver for the High Speed UART DMA
3  *
4  * Copyright (C) 2015 Intel Corporation
5  * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6  *
7  * Partially based on the bits found in drivers/tty/serial/mfd.c.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 /*
15  * DMA channel allocation:
16  * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
17  *    Write (UART RX).
18  * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
19  *    port 3, and so on.
20  */
21 
22 #include <linux/delay.h>
23 #include <linux/dmaengine.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 
29 #include "hsu.h"
30 
31 #define HSU_DMA_BUSWIDTHS				\
32 	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	|	\
33 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		|	\
34 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		|	\
35 	BIT(DMA_SLAVE_BUSWIDTH_3_BYTES)		|	\
36 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)		|	\
37 	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)		|	\
38 	BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
39 
40 static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
41 {
42 	hsu_chan_writel(hsuc, HSU_CH_CR, 0);
43 }
44 
45 static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
46 {
47 	u32 cr = HSU_CH_CR_CHA;
48 
49 	if (hsuc->direction == DMA_MEM_TO_DEV)
50 		cr &= ~HSU_CH_CR_CHD;
51 	else if (hsuc->direction == DMA_DEV_TO_MEM)
52 		cr |= HSU_CH_CR_CHD;
53 
54 	hsu_chan_writel(hsuc, HSU_CH_CR, cr);
55 }
56 
57 static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
58 {
59 	struct dma_slave_config *config = &hsuc->config;
60 	struct hsu_dma_desc *desc = hsuc->desc;
61 	u32 bsr = 0, mtsr = 0;	/* to shut the compiler up */
62 	u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
63 	unsigned int i, count;
64 
65 	if (hsuc->direction == DMA_MEM_TO_DEV) {
66 		bsr = config->dst_maxburst;
67 		mtsr = config->src_addr_width;
68 	} else if (hsuc->direction == DMA_DEV_TO_MEM) {
69 		bsr = config->src_maxburst;
70 		mtsr = config->dst_addr_width;
71 	}
72 
73 	hsu_chan_disable(hsuc);
74 
75 	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
76 	hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
77 	hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
78 
79 	/* Set descriptors */
80 	count = desc->nents - desc->active;
81 	for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
82 		hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
83 		hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
84 
85 		/* Prepare value for DCR */
86 		dcr |= HSU_CH_DCR_DESCA(i);
87 		dcr |= HSU_CH_DCR_CHTOI(i);	/* timeout bit, see HSU Errata 1 */
88 
89 		desc->active++;
90 	}
91 	/* Only for the last descriptor in the chain */
92 	dcr |= HSU_CH_DCR_CHSOD(count - 1);
93 	dcr |= HSU_CH_DCR_CHDI(count - 1);
94 
95 	hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
96 
97 	hsu_chan_enable(hsuc);
98 }
99 
100 static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
101 {
102 	hsu_chan_disable(hsuc);
103 	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
104 }
105 
106 static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
107 {
108 	hsu_dma_chan_start(hsuc);
109 }
110 
111 static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
112 {
113 	struct virt_dma_desc *vdesc;
114 
115 	/* Get the next descriptor */
116 	vdesc = vchan_next_desc(&hsuc->vchan);
117 	if (!vdesc) {
118 		hsuc->desc = NULL;
119 		return;
120 	}
121 
122 	list_del(&vdesc->node);
123 	hsuc->desc = to_hsu_dma_desc(vdesc);
124 
125 	/* Start the channel with a new descriptor */
126 	hsu_dma_start_channel(hsuc);
127 }
128 
129 /*
130  *      hsu_dma_get_status() - get DMA channel status
131  *      @chip: HSUART DMA chip
132  *      @nr: DMA channel number
133  *      @status: pointer for DMA Channel Status Register value
134  *
135  *      Description:
136  *      The function reads and clears the DMA Channel Status Register, checks
137  *      if it was a timeout interrupt and returns a corresponding value.
138  *
139  *      Caller should provide a valid pointer for the DMA Channel Status
140  *      Register value that will be returned in @status.
141  *
142  *      Return:
143  *      1 for DMA timeout status, 0 for other DMA status, or error code for
144  *      invalid parameters or no interrupt pending.
145  */
146 int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
147 		       u32 *status)
148 {
149 	struct hsu_dma_chan *hsuc;
150 	unsigned long flags;
151 	u32 sr;
152 
153 	/* Sanity check */
154 	if (nr >= chip->hsu->nr_channels)
155 		return -EINVAL;
156 
157 	hsuc = &chip->hsu->chan[nr];
158 
159 	/*
160 	 * No matter what situation, need read clear the IRQ status
161 	 * There is a bug, see Errata 5, HSD 2900918
162 	 */
163 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
164 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
165 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
166 
167 	/* Check if any interrupt is pending */
168 	sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
169 	if (!sr)
170 		return -EIO;
171 
172 	/* Timeout IRQ, need wait some time, see Errata 2 */
173 	if (sr & HSU_CH_SR_DESCTO_ANY)
174 		udelay(2);
175 
176 	/*
177 	 * At this point, at least one of Descriptor Time Out, Channel Error
178 	 * or Descriptor Done bits must be set. Clear the Descriptor Time Out
179 	 * bits and if sr is still non-zero, it must be channel error or
180 	 * descriptor done which are higher priority than timeout and handled
181 	 * in hsu_dma_do_irq(). Else, it must be a timeout.
182 	 */
183 	sr &= ~HSU_CH_SR_DESCTO_ANY;
184 
185 	*status = sr;
186 
187 	return sr ? 0 : 1;
188 }
189 EXPORT_SYMBOL_GPL(hsu_dma_get_status);
190 
191 /*
192  *      hsu_dma_do_irq() - DMA interrupt handler
193  *      @chip: HSUART DMA chip
194  *      @nr: DMA channel number
195  *      @status: Channel Status Register value
196  *
197  *      Description:
198  *      This function handles Channel Error and Descriptor Done interrupts.
199  *      This function should be called after determining that the DMA interrupt
200  *      is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
201  *
202  *      Return:
203  *      IRQ_NONE for invalid channel number, IRQ_HANDLED otherwise.
204  */
205 irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
206 			   u32 status)
207 {
208 	struct hsu_dma_chan *hsuc;
209 	struct hsu_dma_desc *desc;
210 	unsigned long flags;
211 
212 	/* Sanity check */
213 	if (nr >= chip->hsu->nr_channels)
214 		return IRQ_NONE;
215 
216 	hsuc = &chip->hsu->chan[nr];
217 
218 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
219 	desc = hsuc->desc;
220 	if (desc) {
221 		if (status & HSU_CH_SR_CHE) {
222 			desc->status = DMA_ERROR;
223 		} else if (desc->active < desc->nents) {
224 			hsu_dma_start_channel(hsuc);
225 		} else {
226 			vchan_cookie_complete(&desc->vdesc);
227 			desc->status = DMA_COMPLETE;
228 			hsu_dma_start_transfer(hsuc);
229 		}
230 	}
231 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
232 
233 	return IRQ_HANDLED;
234 }
235 EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
236 
237 static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
238 {
239 	struct hsu_dma_desc *desc;
240 
241 	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
242 	if (!desc)
243 		return NULL;
244 
245 	desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
246 	if (!desc->sg) {
247 		kfree(desc);
248 		return NULL;
249 	}
250 
251 	return desc;
252 }
253 
254 static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
255 {
256 	struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
257 
258 	kfree(desc->sg);
259 	kfree(desc);
260 }
261 
262 static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
263 		struct dma_chan *chan, struct scatterlist *sgl,
264 		unsigned int sg_len, enum dma_transfer_direction direction,
265 		unsigned long flags, void *context)
266 {
267 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
268 	struct hsu_dma_desc *desc;
269 	struct scatterlist *sg;
270 	unsigned int i;
271 
272 	desc = hsu_dma_alloc_desc(sg_len);
273 	if (!desc)
274 		return NULL;
275 
276 	for_each_sg(sgl, sg, sg_len, i) {
277 		desc->sg[i].addr = sg_dma_address(sg);
278 		desc->sg[i].len = sg_dma_len(sg);
279 
280 		desc->length += sg_dma_len(sg);
281 	}
282 
283 	desc->nents = sg_len;
284 	desc->direction = direction;
285 	/* desc->active = 0 by kzalloc */
286 	desc->status = DMA_IN_PROGRESS;
287 
288 	return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
289 }
290 
291 static void hsu_dma_issue_pending(struct dma_chan *chan)
292 {
293 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
294 	unsigned long flags;
295 
296 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
297 	if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
298 		hsu_dma_start_transfer(hsuc);
299 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
300 }
301 
302 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
303 {
304 	struct hsu_dma_desc *desc = hsuc->desc;
305 	size_t bytes = 0;
306 	int i;
307 
308 	for (i = desc->active; i < desc->nents; i++)
309 		bytes += desc->sg[i].len;
310 
311 	i = HSU_DMA_CHAN_NR_DESC - 1;
312 	do {
313 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
314 	} while (--i >= 0);
315 
316 	return bytes;
317 }
318 
319 static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
320 	dma_cookie_t cookie, struct dma_tx_state *state)
321 {
322 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
323 	struct virt_dma_desc *vdesc;
324 	enum dma_status status;
325 	size_t bytes;
326 	unsigned long flags;
327 
328 	status = dma_cookie_status(chan, cookie, state);
329 	if (status == DMA_COMPLETE)
330 		return status;
331 
332 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
333 	vdesc = vchan_find_desc(&hsuc->vchan, cookie);
334 	if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
335 		bytes = hsu_dma_active_desc_size(hsuc);
336 		dma_set_residue(state, bytes);
337 		status = hsuc->desc->status;
338 	} else if (vdesc) {
339 		bytes = to_hsu_dma_desc(vdesc)->length;
340 		dma_set_residue(state, bytes);
341 	}
342 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
343 
344 	return status;
345 }
346 
347 static int hsu_dma_slave_config(struct dma_chan *chan,
348 				struct dma_slave_config *config)
349 {
350 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
351 
352 	/* Check if chan will be configured for slave transfers */
353 	if (!is_slave_direction(config->direction))
354 		return -EINVAL;
355 
356 	memcpy(&hsuc->config, config, sizeof(hsuc->config));
357 
358 	return 0;
359 }
360 
361 static int hsu_dma_pause(struct dma_chan *chan)
362 {
363 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
364 	unsigned long flags;
365 
366 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
367 	if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
368 		hsu_chan_disable(hsuc);
369 		hsuc->desc->status = DMA_PAUSED;
370 	}
371 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
372 
373 	return 0;
374 }
375 
376 static int hsu_dma_resume(struct dma_chan *chan)
377 {
378 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
379 	unsigned long flags;
380 
381 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
382 	if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
383 		hsuc->desc->status = DMA_IN_PROGRESS;
384 		hsu_chan_enable(hsuc);
385 	}
386 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
387 
388 	return 0;
389 }
390 
391 static int hsu_dma_terminate_all(struct dma_chan *chan)
392 {
393 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
394 	unsigned long flags;
395 	LIST_HEAD(head);
396 
397 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
398 
399 	hsu_dma_stop_channel(hsuc);
400 	if (hsuc->desc) {
401 		hsu_dma_desc_free(&hsuc->desc->vdesc);
402 		hsuc->desc = NULL;
403 	}
404 
405 	vchan_get_all_descriptors(&hsuc->vchan, &head);
406 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
407 	vchan_dma_desc_free_list(&hsuc->vchan, &head);
408 
409 	return 0;
410 }
411 
412 static void hsu_dma_free_chan_resources(struct dma_chan *chan)
413 {
414 	vchan_free_chan_resources(to_virt_chan(chan));
415 }
416 
417 int hsu_dma_probe(struct hsu_dma_chip *chip)
418 {
419 	struct hsu_dma *hsu;
420 	void __iomem *addr = chip->regs + chip->offset;
421 	unsigned short i;
422 	int ret;
423 
424 	hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
425 	if (!hsu)
426 		return -ENOMEM;
427 
428 	chip->hsu = hsu;
429 
430 	/* Calculate nr_channels from the IO space length */
431 	hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
432 
433 	hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
434 				 sizeof(*hsu->chan), GFP_KERNEL);
435 	if (!hsu->chan)
436 		return -ENOMEM;
437 
438 	INIT_LIST_HEAD(&hsu->dma.channels);
439 	for (i = 0; i < hsu->nr_channels; i++) {
440 		struct hsu_dma_chan *hsuc = &hsu->chan[i];
441 
442 		hsuc->vchan.desc_free = hsu_dma_desc_free;
443 		vchan_init(&hsuc->vchan, &hsu->dma);
444 
445 		hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
446 		hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
447 	}
448 
449 	dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
450 	dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
451 
452 	hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
453 
454 	hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
455 
456 	hsu->dma.device_issue_pending = hsu_dma_issue_pending;
457 	hsu->dma.device_tx_status = hsu_dma_tx_status;
458 
459 	hsu->dma.device_config = hsu_dma_slave_config;
460 	hsu->dma.device_pause = hsu_dma_pause;
461 	hsu->dma.device_resume = hsu_dma_resume;
462 	hsu->dma.device_terminate_all = hsu_dma_terminate_all;
463 
464 	hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
465 	hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
466 	hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
467 	hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
468 
469 	hsu->dma.dev = chip->dev;
470 
471 	dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
472 
473 	ret = dma_async_device_register(&hsu->dma);
474 	if (ret)
475 		return ret;
476 
477 	dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
478 	return 0;
479 }
480 EXPORT_SYMBOL_GPL(hsu_dma_probe);
481 
482 int hsu_dma_remove(struct hsu_dma_chip *chip)
483 {
484 	struct hsu_dma *hsu = chip->hsu;
485 	unsigned short i;
486 
487 	dma_async_device_unregister(&hsu->dma);
488 
489 	for (i = 0; i < hsu->nr_channels; i++) {
490 		struct hsu_dma_chan *hsuc = &hsu->chan[i];
491 
492 		tasklet_kill(&hsuc->vchan.task);
493 	}
494 
495 	return 0;
496 }
497 EXPORT_SYMBOL_GPL(hsu_dma_remove);
498 
499 MODULE_LICENSE("GPL v2");
500 MODULE_DESCRIPTION("High Speed UART DMA core driver");
501 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
502