xref: /linux/drivers/dma/tegra186-gpc-dma.c (revision 23db0ed34f9e3756d243c5dc56d9f7c1fadecf89)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * DMA driver for NVIDIA Tegra GPC DMA controller.
4  *
5  * Copyright (c) 2014-2022, NVIDIA CORPORATION.  All rights reserved.
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/iommu.h>
13 #include <linux/iopoll.h>
14 #include <linux/minmax.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_dma.h>
18 #include <linux/platform_device.h>
19 #include <linux/reset.h>
20 #include <linux/slab.h>
21 #include <dt-bindings/memory/tegra186-mc.h>
22 #include "virt-dma.h"
23 
24 /* CSR register */
25 #define TEGRA_GPCDMA_CHAN_CSR			0x00
26 #define TEGRA_GPCDMA_CSR_ENB			BIT(31)
27 #define TEGRA_GPCDMA_CSR_IE_EOC			BIT(30)
28 #define TEGRA_GPCDMA_CSR_ONCE			BIT(27)
29 
30 #define TEGRA_GPCDMA_CSR_FC_MODE		GENMASK(25, 24)
31 #define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO	\
32 		FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0)
33 #define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO	\
34 		FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1)
35 #define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO	\
36 		FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2)
37 #define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO	\
38 		FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3)
39 
40 #define TEGRA_GPCDMA_CSR_DMA			GENMASK(23, 21)
41 #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC	\
42 		FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0)
43 #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC		\
44 		FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1)
45 #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC	\
46 		FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2)
47 #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC		\
48 		FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3)
49 #define TEGRA_GPCDMA_CSR_DMA_MEM2MEM		\
50 		FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4)
51 #define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT		\
52 		FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6)
53 
54 #define TEGRA_GPCDMA_CSR_REQ_SEL_MASK		GENMASK(20, 16)
55 #define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED		\
56 					FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4)
57 #define TEGRA_GPCDMA_CSR_IRQ_MASK		BIT(15)
58 #define TEGRA_GPCDMA_CSR_WEIGHT			GENMASK(13, 10)
59 
60 /* STATUS register */
61 #define TEGRA_GPCDMA_CHAN_STATUS		0x004
62 #define TEGRA_GPCDMA_STATUS_BUSY		BIT(31)
63 #define TEGRA_GPCDMA_STATUS_ISE_EOC		BIT(30)
64 #define TEGRA_GPCDMA_STATUS_PING_PONG		BIT(28)
65 #define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY	BIT(27)
66 #define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE	BIT(26)
67 #define TEGRA_GPCDMA_STATUS_CHANNEL_RX		BIT(25)
68 #define TEGRA_GPCDMA_STATUS_CHANNEL_TX		BIT(24)
69 #define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA	BIT(23)
70 #define TEGRA_GPCDMA_STATUS_IRQ_STA		BIT(21)
71 #define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA	BIT(20)
72 
73 #define TEGRA_GPCDMA_CHAN_CSRE			0x008
74 #define TEGRA_GPCDMA_CHAN_CSRE_PAUSE		BIT(31)
75 
76 /* Source address */
77 #define TEGRA_GPCDMA_CHAN_SRC_PTR		0x00C
78 
79 /* Destination address */
80 #define TEGRA_GPCDMA_CHAN_DST_PTR		0x010
81 
82 /* High address pointer */
83 #define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR		0x014
84 #define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR		GENMASK(7, 0)
85 #define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR		GENMASK(23, 16)
86 
87 /* MC sequence register */
88 #define TEGRA_GPCDMA_CHAN_MCSEQ			0x18
89 #define TEGRA_GPCDMA_MCSEQ_DATA_SWAP		BIT(31)
90 #define TEGRA_GPCDMA_MCSEQ_REQ_COUNT		GENMASK(30, 25)
91 #define TEGRA_GPCDMA_MCSEQ_BURST		GENMASK(24, 23)
92 #define TEGRA_GPCDMA_MCSEQ_BURST_2		\
93 		FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0)
94 #define TEGRA_GPCDMA_MCSEQ_BURST_16		\
95 		FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3)
96 #define TEGRA_GPCDMA_MCSEQ_WRAP1		GENMASK(22, 20)
97 #define TEGRA_GPCDMA_MCSEQ_WRAP0		GENMASK(19, 17)
98 #define TEGRA_GPCDMA_MCSEQ_WRAP_NONE		0
99 
100 #define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK	GENMASK(13, 7)
101 #define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK	GENMASK(6, 0)
102 
103 /* MMIO sequence register */
104 #define TEGRA_GPCDMA_CHAN_MMIOSEQ			0x01c
105 #define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF		BIT(31)
106 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH		GENMASK(30, 28)
107 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8	\
108 		FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0)
109 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16	\
110 		FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1)
111 #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32	\
112 		FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2)
113 #define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP		BIT(27)
114 #define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT	23
115 #define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN		2U
116 #define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX		32U
117 #define TEGRA_GPCDMA_MMIOSEQ_BURST(bs)	\
118 		(GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT)
119 #define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID		GENMASK(22, 19)
120 #define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD		GENMASK(18, 16)
121 #define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT		GENMASK(8, 7)
122 
123 /* Channel WCOUNT */
124 #define TEGRA_GPCDMA_CHAN_WCOUNT		0x20
125 
126 /* Transfer count */
127 #define TEGRA_GPCDMA_CHAN_XFER_COUNT		0x24
128 
129 /* DMA byte count status */
130 #define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS	0x28
131 
132 /* Error Status Register */
133 #define TEGRA_GPCDMA_CHAN_ERR_STATUS		0x30
134 #define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT	8
135 #define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK	0xF
136 #define TEGRA_GPCDMA_CHAN_ERR_TYPE(err)	(			\
137 		((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) &	\
138 		TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK)
139 #define TEGRA_DMA_BM_FIFO_FULL_ERR		0xF
140 #define TEGRA_DMA_PERIPH_FIFO_FULL_ERR		0xE
141 #define TEGRA_DMA_PERIPH_ID_ERR			0xD
142 #define TEGRA_DMA_STREAM_ID_ERR			0xC
143 #define TEGRA_DMA_MC_SLAVE_ERR			0xB
144 #define TEGRA_DMA_MMIO_SLAVE_ERR		0xA
145 
146 /* Fixed Pattern */
147 #define TEGRA_GPCDMA_CHAN_FIXED_PATTERN		0x34
148 
149 #define TEGRA_GPCDMA_CHAN_TZ			0x38
150 #define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1	BIT(0)
151 #define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1		BIT(1)
152 
153 #define TEGRA_GPCDMA_CHAN_SPARE			0x3c
154 #define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC	BIT(16)
155 
156 /*
157  * If any burst is in flight and DMA paused then this is the time to complete
158  * on-flight burst and update DMA status register.
159  */
160 #define TEGRA_GPCDMA_BURST_COMPLETE_TIME	10
161 #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT	5000 /* 5 msec */
162 
163 /* Channel base address offset from GPCDMA base address */
164 #define TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET	0x10000
165 
166 /* Default channel mask reserving channel0 */
167 #define TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK	0xfffffffe
168 
169 struct tegra_dma;
170 struct tegra_dma_channel;
171 
172 /*
173  * tegra_dma_chip_data Tegra chip specific DMA data
174  * @nr_channels: Number of channels available in the controller.
175  * @channel_reg_size: Channel register size.
176  * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
177  * @hw_support_pause: DMA HW engine support pause of the channel.
178  */
179 struct tegra_dma_chip_data {
180 	bool hw_support_pause;
181 	unsigned int nr_channels;
182 	unsigned int channel_reg_size;
183 	unsigned int max_dma_count;
184 	int (*terminate)(struct tegra_dma_channel *tdc);
185 };
186 
187 /* DMA channel registers */
188 struct tegra_dma_channel_regs {
189 	u32 csr;
190 	u32 src_ptr;
191 	u32 dst_ptr;
192 	u32 high_addr_ptr;
193 	u32 mc_seq;
194 	u32 mmio_seq;
195 	u32 wcount;
196 	u32 fixed_pattern;
197 };
198 
199 /*
200  * tegra_dma_sg_req: DMA request details to configure hardware. This
201  * contains the details for one transfer to configure DMA hw.
202  * The client's request for data transfer can be broken into multiple
203  * sub-transfer as per requester details and hw support. This sub transfer
204  * get added as an array in Tegra DMA desc which manages the transfer details.
205  */
206 struct tegra_dma_sg_req {
207 	unsigned int len;
208 	struct tegra_dma_channel_regs ch_regs;
209 };
210 
211 /*
212  * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to
213  * manage client request and keep track of transfer status, callbacks
214  * and request counts etc.
215  */
216 struct tegra_dma_desc {
217 	bool cyclic;
218 	unsigned int bytes_req;
219 	unsigned int bytes_xfer;
220 	unsigned int sg_idx;
221 	unsigned int sg_count;
222 	struct virt_dma_desc vd;
223 	struct tegra_dma_channel *tdc;
224 	struct tegra_dma_sg_req sg_req[] __counted_by(sg_count);
225 };
226 
227 /*
228  * tegra_dma_channel: Channel specific information
229  */
230 struct tegra_dma_channel {
231 	bool config_init;
232 	char name[30];
233 	enum dma_transfer_direction sid_dir;
234 	enum dma_status status;
235 	int id;
236 	int irq;
237 	int slave_id;
238 	struct tegra_dma *tdma;
239 	struct virt_dma_chan vc;
240 	struct tegra_dma_desc *dma_desc;
241 	struct dma_slave_config dma_sconfig;
242 	unsigned int stream_id;
243 	unsigned long chan_base_offset;
244 };
245 
246 /*
247  * tegra_dma: Tegra DMA specific information
248  */
249 struct tegra_dma {
250 	const struct tegra_dma_chip_data *chip_data;
251 	unsigned long sid_m2d_reserved;
252 	unsigned long sid_d2m_reserved;
253 	u32 chan_mask;
254 	void __iomem *base_addr;
255 	struct device *dev;
256 	struct dma_device dma_dev;
257 	struct reset_control *rst;
258 	struct tegra_dma_channel channels[];
259 };
260 
tdc_write(struct tegra_dma_channel * tdc,u32 reg,u32 val)261 static inline void tdc_write(struct tegra_dma_channel *tdc,
262 			     u32 reg, u32 val)
263 {
264 	writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
265 }
266 
tdc_read(struct tegra_dma_channel * tdc,u32 reg)267 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
268 {
269 	return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
270 }
271 
to_tegra_dma_chan(struct dma_chan * dc)272 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
273 {
274 	return container_of(dc, struct tegra_dma_channel, vc.chan);
275 }
276 
vd_to_tegra_dma_desc(struct virt_dma_desc * vd)277 static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd)
278 {
279 	return container_of(vd, struct tegra_dma_desc, vd);
280 }
281 
tdc2dev(struct tegra_dma_channel * tdc)282 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
283 {
284 	return tdc->vc.chan.device->dev;
285 }
286 
tegra_dma_dump_chan_regs(struct tegra_dma_channel * tdc)287 static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc)
288 {
289 	dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n",
290 		tdc->id, tdc->name);
291 	dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n",
292 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR),
293 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS),
294 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE),
295 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR),
296 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR)
297 	);
298 	dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n",
299 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ),
300 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ),
301 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT),
302 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT),
303 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS)
304 	);
305 	dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n",
306 		tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS));
307 }
308 
tegra_dma_sid_reserve(struct tegra_dma_channel * tdc,enum dma_transfer_direction direction)309 static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc,
310 				 enum dma_transfer_direction direction)
311 {
312 	struct tegra_dma *tdma = tdc->tdma;
313 	int sid = tdc->slave_id;
314 
315 	if (!is_slave_direction(direction))
316 		return 0;
317 
318 	switch (direction) {
319 	case DMA_MEM_TO_DEV:
320 		if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) {
321 			dev_err(tdma->dev, "slave id already in use\n");
322 			return -EINVAL;
323 		}
324 		break;
325 	case DMA_DEV_TO_MEM:
326 		if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) {
327 			dev_err(tdma->dev, "slave id already in use\n");
328 			return -EINVAL;
329 		}
330 		break;
331 	default:
332 		break;
333 	}
334 
335 	tdc->sid_dir = direction;
336 
337 	return 0;
338 }
339 
tegra_dma_sid_free(struct tegra_dma_channel * tdc)340 static void tegra_dma_sid_free(struct tegra_dma_channel *tdc)
341 {
342 	struct tegra_dma *tdma = tdc->tdma;
343 	int sid = tdc->slave_id;
344 
345 	switch (tdc->sid_dir) {
346 	case DMA_MEM_TO_DEV:
347 		clear_bit(sid,  &tdma->sid_m2d_reserved);
348 		break;
349 	case DMA_DEV_TO_MEM:
350 		clear_bit(sid,  &tdma->sid_d2m_reserved);
351 		break;
352 	default:
353 		break;
354 	}
355 
356 	tdc->sid_dir = DMA_TRANS_NONE;
357 }
358 
tegra_dma_desc_free(struct virt_dma_desc * vd)359 static void tegra_dma_desc_free(struct virt_dma_desc *vd)
360 {
361 	kfree(container_of(vd, struct tegra_dma_desc, vd));
362 }
363 
tegra_dma_slave_config(struct dma_chan * dc,struct dma_slave_config * sconfig)364 static int tegra_dma_slave_config(struct dma_chan *dc,
365 				  struct dma_slave_config *sconfig)
366 {
367 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
368 
369 	memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
370 	tdc->config_init = true;
371 
372 	return 0;
373 }
374 
tegra_dma_pause(struct tegra_dma_channel * tdc)375 static int tegra_dma_pause(struct tegra_dma_channel *tdc)
376 {
377 	int ret;
378 	u32 val;
379 
380 	val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
381 	val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
382 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
383 
384 	/* Wait until busy bit is de-asserted */
385 	ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
386 			tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
387 			val,
388 			!(val & TEGRA_GPCDMA_STATUS_BUSY),
389 			TEGRA_GPCDMA_BURST_COMPLETE_TIME,
390 			TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
391 
392 	if (ret) {
393 		dev_err(tdc2dev(tdc), "DMA pause timed out\n");
394 		tegra_dma_dump_chan_regs(tdc);
395 	}
396 
397 	tdc->status = DMA_PAUSED;
398 
399 	return ret;
400 }
401 
tegra_dma_device_pause(struct dma_chan * dc)402 static int tegra_dma_device_pause(struct dma_chan *dc)
403 {
404 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
405 	unsigned long flags;
406 	int ret;
407 
408 	if (!tdc->tdma->chip_data->hw_support_pause)
409 		return -ENOSYS;
410 
411 	spin_lock_irqsave(&tdc->vc.lock, flags);
412 	ret = tegra_dma_pause(tdc);
413 	spin_unlock_irqrestore(&tdc->vc.lock, flags);
414 
415 	return ret;
416 }
417 
tegra_dma_resume(struct tegra_dma_channel * tdc)418 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
419 {
420 	u32 val;
421 
422 	val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
423 	val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
424 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
425 
426 	tdc->status = DMA_IN_PROGRESS;
427 }
428 
tegra_dma_device_resume(struct dma_chan * dc)429 static int tegra_dma_device_resume(struct dma_chan *dc)
430 {
431 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
432 	unsigned long flags;
433 
434 	if (!tdc->tdma->chip_data->hw_support_pause)
435 		return -ENOSYS;
436 
437 	spin_lock_irqsave(&tdc->vc.lock, flags);
438 	tegra_dma_resume(tdc);
439 	spin_unlock_irqrestore(&tdc->vc.lock, flags);
440 
441 	return 0;
442 }
443 
tegra_dma_pause_noerr(struct tegra_dma_channel * tdc)444 static inline int tegra_dma_pause_noerr(struct tegra_dma_channel *tdc)
445 {
446 	/* Return 0 irrespective of PAUSE status.
447 	 * This is useful to recover channels that can exit out of flush
448 	 * state when the channel is disabled.
449 	 */
450 
451 	tegra_dma_pause(tdc);
452 	return 0;
453 }
454 
tegra_dma_disable(struct tegra_dma_channel * tdc)455 static void tegra_dma_disable(struct tegra_dma_channel *tdc)
456 {
457 	u32 csr, status;
458 
459 	csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
460 
461 	/* Disable interrupts */
462 	csr &= ~TEGRA_GPCDMA_CSR_IE_EOC;
463 
464 	/* Disable DMA */
465 	csr &= ~TEGRA_GPCDMA_CSR_ENB;
466 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
467 
468 	/* Clear interrupt status if it is there */
469 	status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
470 	if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) {
471 		dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
472 		tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status);
473 	}
474 }
475 
tegra_dma_configure_next_sg(struct tegra_dma_channel * tdc)476 static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc)
477 {
478 	struct tegra_dma_desc *dma_desc = tdc->dma_desc;
479 	struct tegra_dma_channel_regs *ch_regs;
480 	int ret;
481 	u32 val;
482 
483 	dma_desc->sg_idx++;
484 
485 	/* Reset the sg index for cyclic transfers */
486 	if (dma_desc->sg_idx == dma_desc->sg_count)
487 		dma_desc->sg_idx = 0;
488 
489 	/* Configure next transfer immediately after DMA is busy */
490 	ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
491 			tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS,
492 			val,
493 			(val & TEGRA_GPCDMA_STATUS_BUSY), 0,
494 			TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
495 	if (ret)
496 		return;
497 
498 	ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
499 
500 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
501 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
502 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
503 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
504 
505 	/* Start DMA */
506 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
507 		  ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
508 }
509 
tegra_dma_start(struct tegra_dma_channel * tdc)510 static void tegra_dma_start(struct tegra_dma_channel *tdc)
511 {
512 	struct tegra_dma_desc *dma_desc = tdc->dma_desc;
513 	struct tegra_dma_channel_regs *ch_regs;
514 	struct virt_dma_desc *vdesc;
515 
516 	if (!dma_desc) {
517 		vdesc = vchan_next_desc(&tdc->vc);
518 		if (!vdesc)
519 			return;
520 
521 		dma_desc = vd_to_tegra_dma_desc(vdesc);
522 		list_del(&vdesc->node);
523 		dma_desc->tdc = tdc;
524 		tdc->dma_desc = dma_desc;
525 
526 		tegra_dma_resume(tdc);
527 	}
528 
529 	ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs;
530 
531 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount);
532 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0);
533 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr);
534 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr);
535 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr);
536 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern);
537 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq);
538 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq);
539 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr);
540 
541 	/* Start DMA */
542 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR,
543 		  ch_regs->csr | TEGRA_GPCDMA_CSR_ENB);
544 }
545 
tegra_dma_xfer_complete(struct tegra_dma_channel * tdc)546 static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
547 {
548 	vchan_cookie_complete(&tdc->dma_desc->vd);
549 
550 	tegra_dma_sid_free(tdc);
551 	tdc->dma_desc = NULL;
552 	tdc->status = DMA_COMPLETE;
553 }
554 
tegra_dma_chan_decode_error(struct tegra_dma_channel * tdc,unsigned int err_status)555 static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
556 					unsigned int err_status)
557 {
558 	switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) {
559 	case TEGRA_DMA_BM_FIFO_FULL_ERR:
560 		dev_err(tdc->tdma->dev,
561 			"GPCDMA CH%d bm fifo full\n", tdc->id);
562 		break;
563 
564 	case TEGRA_DMA_PERIPH_FIFO_FULL_ERR:
565 		dev_err(tdc->tdma->dev,
566 			"GPCDMA CH%d peripheral fifo full\n", tdc->id);
567 		break;
568 
569 	case TEGRA_DMA_PERIPH_ID_ERR:
570 		dev_err(tdc->tdma->dev,
571 			"GPCDMA CH%d illegal peripheral id\n", tdc->id);
572 		break;
573 
574 	case TEGRA_DMA_STREAM_ID_ERR:
575 		dev_err(tdc->tdma->dev,
576 			"GPCDMA CH%d illegal stream id\n", tdc->id);
577 		break;
578 
579 	case TEGRA_DMA_MC_SLAVE_ERR:
580 		dev_err(tdc->tdma->dev,
581 			"GPCDMA CH%d mc slave error\n", tdc->id);
582 		break;
583 
584 	case TEGRA_DMA_MMIO_SLAVE_ERR:
585 		dev_err(tdc->tdma->dev,
586 			"GPCDMA CH%d mmio slave error\n", tdc->id);
587 		break;
588 
589 	default:
590 		dev_err(tdc->tdma->dev,
591 			"GPCDMA CH%d security violation %x\n", tdc->id,
592 			err_status);
593 	}
594 }
595 
tegra_dma_isr(int irq,void * dev_id)596 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
597 {
598 	struct tegra_dma_channel *tdc = dev_id;
599 	struct tegra_dma_desc *dma_desc = tdc->dma_desc;
600 	struct tegra_dma_sg_req *sg_req;
601 	u32 status;
602 
603 	/* Check channel error status register */
604 	status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS);
605 	if (status) {
606 		tegra_dma_chan_decode_error(tdc, status);
607 		tegra_dma_dump_chan_regs(tdc);
608 		tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF);
609 	}
610 
611 	spin_lock(&tdc->vc.lock);
612 	status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
613 	if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC))
614 		goto irq_done;
615 
616 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS,
617 		  TEGRA_GPCDMA_STATUS_ISE_EOC);
618 
619 	if (!dma_desc)
620 		goto irq_done;
621 
622 	sg_req = dma_desc->sg_req;
623 	dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len;
624 
625 	if (dma_desc->cyclic) {
626 		vchan_cyclic_callback(&dma_desc->vd);
627 		tegra_dma_configure_next_sg(tdc);
628 	} else {
629 		dma_desc->sg_idx++;
630 		if (dma_desc->sg_idx == dma_desc->sg_count)
631 			tegra_dma_xfer_complete(tdc);
632 		else
633 			tegra_dma_start(tdc);
634 	}
635 
636 irq_done:
637 	spin_unlock(&tdc->vc.lock);
638 	return IRQ_HANDLED;
639 }
640 
tegra_dma_issue_pending(struct dma_chan * dc)641 static void tegra_dma_issue_pending(struct dma_chan *dc)
642 {
643 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
644 	unsigned long flags;
645 
646 	if (tdc->dma_desc)
647 		return;
648 
649 	spin_lock_irqsave(&tdc->vc.lock, flags);
650 	if (vchan_issue_pending(&tdc->vc))
651 		tegra_dma_start(tdc);
652 
653 	/*
654 	 * For cyclic DMA transfers, program the second
655 	 * transfer parameters as soon as the first DMA
656 	 * transfer is started inorder for the DMA
657 	 * controller to trigger the second transfer
658 	 * with the correct parameters.
659 	 */
660 	if (tdc->dma_desc && tdc->dma_desc->cyclic)
661 		tegra_dma_configure_next_sg(tdc);
662 
663 	spin_unlock_irqrestore(&tdc->vc.lock, flags);
664 }
665 
tegra_dma_stop_client(struct tegra_dma_channel * tdc)666 static int tegra_dma_stop_client(struct tegra_dma_channel *tdc)
667 {
668 	int ret;
669 	u32 status, csr;
670 
671 	/*
672 	 * Change the client associated with the DMA channel
673 	 * to stop DMA engine from starting any more bursts for
674 	 * the given client and wait for in flight bursts to complete
675 	 */
676 	csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR);
677 	csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK);
678 	csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED;
679 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr);
680 
681 	/* Wait for in flight data transfer to finish */
682 	udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME);
683 
684 	/* If TX/RX path is still active wait till it becomes
685 	 * inactive
686 	 */
687 
688 	ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr +
689 				tdc->chan_base_offset +
690 				TEGRA_GPCDMA_CHAN_STATUS,
691 				status,
692 				!(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX |
693 				TEGRA_GPCDMA_STATUS_CHANNEL_RX)),
694 				5,
695 				TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT);
696 	if (ret) {
697 		dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n");
698 		tegra_dma_dump_chan_regs(tdc);
699 	}
700 
701 	return ret;
702 }
703 
tegra_dma_terminate_all(struct dma_chan * dc)704 static int tegra_dma_terminate_all(struct dma_chan *dc)
705 {
706 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
707 	unsigned long flags;
708 	LIST_HEAD(head);
709 	int err;
710 
711 	spin_lock_irqsave(&tdc->vc.lock, flags);
712 
713 	if (tdc->dma_desc) {
714 		err = tdc->tdma->chip_data->terminate(tdc);
715 		if (err) {
716 			spin_unlock_irqrestore(&tdc->vc.lock, flags);
717 			return err;
718 		}
719 
720 		vchan_terminate_vdesc(&tdc->dma_desc->vd);
721 		tegra_dma_disable(tdc);
722 		tdc->dma_desc = NULL;
723 	}
724 
725 	tdc->status = DMA_COMPLETE;
726 	tegra_dma_sid_free(tdc);
727 	vchan_get_all_descriptors(&tdc->vc, &head);
728 	spin_unlock_irqrestore(&tdc->vc.lock, flags);
729 
730 	vchan_dma_desc_free_list(&tdc->vc, &head);
731 
732 	return 0;
733 }
734 
tegra_dma_get_residual(struct tegra_dma_channel * tdc)735 static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
736 {
737 	struct tegra_dma_desc *dma_desc = tdc->dma_desc;
738 	struct tegra_dma_sg_req *sg_req = dma_desc->sg_req;
739 	unsigned int bytes_xfer, residual;
740 	u32 wcount = 0, status;
741 
742 	wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT);
743 
744 	/*
745 	 * Set wcount = 0 if EOC bit is set. The transfer would have
746 	 * already completed and the CHAN_XFER_COUNT could have updated
747 	 * for the next transfer, specifically in case of cyclic transfers.
748 	 */
749 	status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS);
750 	if (status & TEGRA_GPCDMA_STATUS_ISE_EOC)
751 		wcount = 0;
752 
753 	bytes_xfer = dma_desc->bytes_xfer +
754 		     sg_req[dma_desc->sg_idx].len - (wcount * 4);
755 
756 	if (dma_desc->bytes_req == bytes_xfer)
757 		return 0;
758 
759 	residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
760 
761 	return residual;
762 }
763 
tegra_dma_tx_status(struct dma_chan * dc,dma_cookie_t cookie,struct dma_tx_state * txstate)764 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
765 					   dma_cookie_t cookie,
766 					   struct dma_tx_state *txstate)
767 {
768 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
769 	struct tegra_dma_desc *dma_desc;
770 	struct virt_dma_desc *vd;
771 	unsigned int residual;
772 	unsigned long flags;
773 	enum dma_status ret;
774 
775 	ret = dma_cookie_status(dc, cookie, txstate);
776 	if (ret == DMA_COMPLETE)
777 		return ret;
778 
779 	if (tdc->status == DMA_PAUSED)
780 		ret = DMA_PAUSED;
781 
782 	spin_lock_irqsave(&tdc->vc.lock, flags);
783 	vd = vchan_find_desc(&tdc->vc, cookie);
784 	if (vd) {
785 		dma_desc = vd_to_tegra_dma_desc(vd);
786 		residual = dma_desc->bytes_req;
787 		dma_set_residue(txstate, residual);
788 	} else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) {
789 		residual =  tegra_dma_get_residual(tdc);
790 		dma_set_residue(txstate, residual);
791 	} else {
792 		dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie);
793 	}
794 	spin_unlock_irqrestore(&tdc->vc.lock, flags);
795 
796 	return ret;
797 }
798 
get_bus_width(struct tegra_dma_channel * tdc,enum dma_slave_buswidth slave_bw)799 static inline int get_bus_width(struct tegra_dma_channel *tdc,
800 				enum dma_slave_buswidth slave_bw)
801 {
802 	switch (slave_bw) {
803 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
804 		return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8;
805 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
806 		return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16;
807 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
808 		return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32;
809 	default:
810 		dev_err(tdc2dev(tdc), "given slave bus width is not supported\n");
811 		return -EINVAL;
812 	}
813 }
814 
get_burst_size(struct tegra_dma_channel * tdc,u32 burst_size,enum dma_slave_buswidth slave_bw,int len)815 static unsigned int get_burst_size(struct tegra_dma_channel *tdc,
816 				   u32 burst_size, enum dma_slave_buswidth slave_bw,
817 				   int len)
818 {
819 	unsigned int burst_mmio_width, burst_byte;
820 
821 	/*
822 	 * burst_size from client is in terms of the bus_width.
823 	 * convert that into words.
824 	 * If burst_size is not specified from client, then use
825 	 * len to calculate the optimum burst size
826 	 */
827 	burst_byte = burst_size ? burst_size * slave_bw : len;
828 	burst_mmio_width = burst_byte / 4;
829 
830 	if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN)
831 		return 0;
832 
833 	burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX);
834 
835 	return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width);
836 }
837 
get_transfer_param(struct tegra_dma_channel * tdc,enum dma_transfer_direction direction,u32 * apb_addr,u32 * mmio_seq,u32 * csr,unsigned int * burst_size,enum dma_slave_buswidth * slave_bw)838 static int get_transfer_param(struct tegra_dma_channel *tdc,
839 			      enum dma_transfer_direction direction,
840 			      u32 *apb_addr,
841 			      u32 *mmio_seq,
842 			      u32 *csr,
843 			      unsigned int *burst_size,
844 			      enum dma_slave_buswidth *slave_bw)
845 {
846 	switch (direction) {
847 	case DMA_MEM_TO_DEV:
848 		*apb_addr = tdc->dma_sconfig.dst_addr;
849 		*mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
850 		*burst_size = tdc->dma_sconfig.dst_maxburst;
851 		*slave_bw = tdc->dma_sconfig.dst_addr_width;
852 		*csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC;
853 		return 0;
854 	case DMA_DEV_TO_MEM:
855 		*apb_addr = tdc->dma_sconfig.src_addr;
856 		*mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
857 		*burst_size = tdc->dma_sconfig.src_maxburst;
858 		*slave_bw = tdc->dma_sconfig.src_addr_width;
859 		*csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC;
860 		return 0;
861 	default:
862 		dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
863 	}
864 
865 	return -EINVAL;
866 }
867 
868 static struct dma_async_tx_descriptor *
tegra_dma_prep_dma_memset(struct dma_chan * dc,dma_addr_t dest,int value,size_t len,unsigned long flags)869 tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value,
870 			  size_t len, unsigned long flags)
871 {
872 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
873 	unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
874 	struct tegra_dma_sg_req *sg_req;
875 	struct tegra_dma_desc *dma_desc;
876 	u32 csr, mc_seq;
877 
878 	if ((len & 3) || (dest & 3) || len > max_dma_count) {
879 		dev_err(tdc2dev(tdc),
880 			"DMA length/memory address is not supported\n");
881 		return NULL;
882 	}
883 
884 	/* Set DMA mode to fixed pattern */
885 	csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT;
886 	/* Enable once or continuous mode */
887 	csr |= TEGRA_GPCDMA_CSR_ONCE;
888 	/* Enable IRQ mask */
889 	csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
890 	/* Enable the DMA interrupt */
891 	if (flags & DMA_PREP_INTERRUPT)
892 		csr |= TEGRA_GPCDMA_CSR_IE_EOC;
893 	/* Configure default priority weight for the channel */
894 	csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
895 
896 	mc_seq =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
897 	/* retain stream-id and clean rest */
898 	mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
899 
900 	/* Set the address wrapping */
901 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
902 						TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
903 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
904 						TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
905 
906 	/* Program outstanding MC requests */
907 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
908 	/* Set burst size */
909 	mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
910 
911 	dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
912 	if (!dma_desc)
913 		return NULL;
914 
915 	dma_desc->bytes_req = len;
916 	dma_desc->sg_count = 1;
917 	sg_req = dma_desc->sg_req;
918 
919 	sg_req[0].ch_regs.src_ptr = 0;
920 	sg_req[0].ch_regs.dst_ptr = dest;
921 	sg_req[0].ch_regs.high_addr_ptr =
922 			FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
923 	sg_req[0].ch_regs.fixed_pattern = value;
924 	/* Word count reg takes value as (N +1) words */
925 	sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
926 	sg_req[0].ch_regs.csr = csr;
927 	sg_req[0].ch_regs.mmio_seq = 0;
928 	sg_req[0].ch_regs.mc_seq = mc_seq;
929 	sg_req[0].len = len;
930 
931 	dma_desc->cyclic = false;
932 	return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
933 }
934 
935 static struct dma_async_tx_descriptor *
tegra_dma_prep_dma_memcpy(struct dma_chan * dc,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)936 tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest,
937 			  dma_addr_t src, size_t len, unsigned long flags)
938 {
939 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
940 	struct tegra_dma_sg_req *sg_req;
941 	struct tegra_dma_desc *dma_desc;
942 	unsigned int max_dma_count;
943 	u32 csr, mc_seq;
944 
945 	max_dma_count = tdc->tdma->chip_data->max_dma_count;
946 	if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) {
947 		dev_err(tdc2dev(tdc),
948 			"DMA length/memory address is not supported\n");
949 		return NULL;
950 	}
951 
952 	/* Set DMA mode to memory to memory transfer */
953 	csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM;
954 	/* Enable once or continuous mode */
955 	csr |= TEGRA_GPCDMA_CSR_ONCE;
956 	/* Enable IRQ mask */
957 	csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
958 	/* Enable the DMA interrupt */
959 	if (flags & DMA_PREP_INTERRUPT)
960 		csr |= TEGRA_GPCDMA_CSR_IE_EOC;
961 	/* Configure default priority weight for the channel */
962 	csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
963 
964 	mc_seq =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
965 	/* retain stream-id and clean rest */
966 	mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) |
967 		  (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
968 
969 	/* Set the address wrapping */
970 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
971 			     TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
972 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
973 			     TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
974 
975 	/* Program outstanding MC requests */
976 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
977 	/* Set burst size */
978 	mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
979 
980 	dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT);
981 	if (!dma_desc)
982 		return NULL;
983 
984 	dma_desc->bytes_req = len;
985 	dma_desc->sg_count = 1;
986 	sg_req = dma_desc->sg_req;
987 
988 	sg_req[0].ch_regs.src_ptr = src;
989 	sg_req[0].ch_regs.dst_ptr = dest;
990 	sg_req[0].ch_regs.high_addr_ptr =
991 		FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32));
992 	sg_req[0].ch_regs.high_addr_ptr |=
993 		FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32));
994 	/* Word count reg takes value as (N +1) words */
995 	sg_req[0].ch_regs.wcount = ((len - 4) >> 2);
996 	sg_req[0].ch_regs.csr = csr;
997 	sg_req[0].ch_regs.mmio_seq = 0;
998 	sg_req[0].ch_regs.mc_seq = mc_seq;
999 	sg_req[0].len = len;
1000 
1001 	dma_desc->cyclic = false;
1002 	return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
1003 }
1004 
1005 static struct dma_async_tx_descriptor *
tegra_dma_prep_slave_sg(struct dma_chan * dc,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)1006 tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl,
1007 			unsigned int sg_len, enum dma_transfer_direction direction,
1008 			unsigned long flags, void *context)
1009 {
1010 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1011 	unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count;
1012 	enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1013 	u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0;
1014 	struct tegra_dma_sg_req *sg_req;
1015 	struct tegra_dma_desc *dma_desc;
1016 	struct scatterlist *sg;
1017 	u32 burst_size;
1018 	unsigned int i;
1019 	int ret;
1020 
1021 	if (!tdc->config_init) {
1022 		dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
1023 		return NULL;
1024 	}
1025 	if (sg_len < 1) {
1026 		dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1027 		return NULL;
1028 	}
1029 
1030 	ret = tegra_dma_sid_reserve(tdc, direction);
1031 	if (ret)
1032 		return NULL;
1033 
1034 	ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
1035 				 &burst_size, &slave_bw);
1036 	if (ret < 0)
1037 		return NULL;
1038 
1039 	/* Enable once or continuous mode */
1040 	csr |= TEGRA_GPCDMA_CSR_ONCE;
1041 	/* Program the slave id in requestor select */
1042 	csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
1043 	/* Enable IRQ mask */
1044 	csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
1045 	/* Configure default priority weight for the channel*/
1046 	csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
1047 
1048 	/* Enable the DMA interrupt */
1049 	if (flags & DMA_PREP_INTERRUPT)
1050 		csr |= TEGRA_GPCDMA_CSR_IE_EOC;
1051 
1052 	mc_seq =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
1053 	/* retain stream-id and clean rest */
1054 	mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
1055 
1056 	/* Set the address wrapping on both MC and MMIO side */
1057 
1058 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
1059 			     TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
1060 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
1061 			     TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
1062 	mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
1063 
1064 	/* Program 2 MC outstanding requests by default. */
1065 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
1066 
1067 	/* Setting MC burst size depending on MMIO burst size */
1068 	if (burst_size == 64)
1069 		mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
1070 	else
1071 		mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
1072 
1073 	dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT);
1074 	if (!dma_desc)
1075 		return NULL;
1076 
1077 	dma_desc->sg_count = sg_len;
1078 	sg_req = dma_desc->sg_req;
1079 
1080 	/* Make transfer requests */
1081 	for_each_sg(sgl, sg, sg_len, i) {
1082 		u32 len;
1083 		dma_addr_t mem;
1084 
1085 		mem = sg_dma_address(sg);
1086 		len = sg_dma_len(sg);
1087 
1088 		if ((len & 3) || (mem & 3) || len > max_dma_count) {
1089 			dev_err(tdc2dev(tdc),
1090 				"DMA length/memory address is not supported\n");
1091 			kfree(dma_desc);
1092 			return NULL;
1093 		}
1094 
1095 		mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1096 		dma_desc->bytes_req += len;
1097 
1098 		if (direction == DMA_MEM_TO_DEV) {
1099 			sg_req[i].ch_regs.src_ptr = mem;
1100 			sg_req[i].ch_regs.dst_ptr = apb_ptr;
1101 			sg_req[i].ch_regs.high_addr_ptr =
1102 				FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
1103 		} else if (direction == DMA_DEV_TO_MEM) {
1104 			sg_req[i].ch_regs.src_ptr = apb_ptr;
1105 			sg_req[i].ch_regs.dst_ptr = mem;
1106 			sg_req[i].ch_regs.high_addr_ptr =
1107 				FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
1108 		}
1109 
1110 		/*
1111 		 * Word count register takes input in words. Writing a value
1112 		 * of N into word count register means a req of (N+1) words.
1113 		 */
1114 		sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
1115 		sg_req[i].ch_regs.csr = csr;
1116 		sg_req[i].ch_regs.mmio_seq = mmio_seq;
1117 		sg_req[i].ch_regs.mc_seq = mc_seq;
1118 		sg_req[i].len = len;
1119 	}
1120 
1121 	dma_desc->cyclic = false;
1122 	return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
1123 }
1124 
1125 static struct dma_async_tx_descriptor *
tegra_dma_prep_dma_cyclic(struct dma_chan * dc,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)1126 tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1127 			  size_t period_len, enum dma_transfer_direction direction,
1128 			  unsigned long flags)
1129 {
1130 	enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1131 	u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size;
1132 	unsigned int max_dma_count, len, period_count, i;
1133 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1134 	struct tegra_dma_desc *dma_desc;
1135 	struct tegra_dma_sg_req *sg_req;
1136 	dma_addr_t mem = buf_addr;
1137 	int ret;
1138 
1139 	if (!buf_len || !period_len) {
1140 		dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1141 		return NULL;
1142 	}
1143 
1144 	if (!tdc->config_init) {
1145 		dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1146 		return NULL;
1147 	}
1148 
1149 	ret = tegra_dma_sid_reserve(tdc, direction);
1150 	if (ret)
1151 		return NULL;
1152 
1153 	/*
1154 	 * We only support cycle transfer when buf_len is multiple of
1155 	 * period_len.
1156 	 */
1157 	if (buf_len % period_len) {
1158 		dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1159 		return NULL;
1160 	}
1161 
1162 	len = period_len;
1163 	max_dma_count = tdc->tdma->chip_data->max_dma_count;
1164 	if ((len & 3) || (buf_addr & 3) || len > max_dma_count) {
1165 		dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1166 		return NULL;
1167 	}
1168 
1169 	ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr,
1170 				 &burst_size, &slave_bw);
1171 	if (ret < 0)
1172 		return NULL;
1173 
1174 	/* Enable once or continuous mode */
1175 	csr &= ~TEGRA_GPCDMA_CSR_ONCE;
1176 	/* Program the slave id in requestor select */
1177 	csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id);
1178 	/* Enable IRQ mask */
1179 	csr |= TEGRA_GPCDMA_CSR_IRQ_MASK;
1180 	/* Configure default priority weight for the channel*/
1181 	csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1);
1182 
1183 	/* Enable the DMA interrupt */
1184 	if (flags & DMA_PREP_INTERRUPT)
1185 		csr |= TEGRA_GPCDMA_CSR_IE_EOC;
1186 
1187 	mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1);
1188 
1189 	mc_seq =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
1190 	/* retain stream-id and clean rest */
1191 	mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK;
1192 
1193 	/* Set the address wrapping on both MC and MMIO side */
1194 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0,
1195 			     TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
1196 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1,
1197 			     TEGRA_GPCDMA_MCSEQ_WRAP_NONE);
1198 
1199 	/* Program 2 MC outstanding requests by default. */
1200 	mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1);
1201 	/* Setting MC burst size depending on MMIO burst size */
1202 	if (burst_size == 64)
1203 		mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16;
1204 	else
1205 		mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2;
1206 
1207 	period_count = buf_len / period_len;
1208 	dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count),
1209 			   GFP_NOWAIT);
1210 	if (!dma_desc)
1211 		return NULL;
1212 
1213 	dma_desc->bytes_req = buf_len;
1214 	dma_desc->sg_count = period_count;
1215 	sg_req = dma_desc->sg_req;
1216 
1217 	/* Split transfer equal to period size */
1218 	for (i = 0; i < period_count; i++) {
1219 		mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1220 		if (direction == DMA_MEM_TO_DEV) {
1221 			sg_req[i].ch_regs.src_ptr = mem;
1222 			sg_req[i].ch_regs.dst_ptr = apb_ptr;
1223 			sg_req[i].ch_regs.high_addr_ptr =
1224 				FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32));
1225 		} else if (direction == DMA_DEV_TO_MEM) {
1226 			sg_req[i].ch_regs.src_ptr = apb_ptr;
1227 			sg_req[i].ch_regs.dst_ptr = mem;
1228 			sg_req[i].ch_regs.high_addr_ptr =
1229 				FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32));
1230 		}
1231 		/*
1232 		 * Word count register takes input in words. Writing a value
1233 		 * of N into word count register means a req of (N+1) words.
1234 		 */
1235 		sg_req[i].ch_regs.wcount = ((len - 4) >> 2);
1236 		sg_req[i].ch_regs.csr = csr;
1237 		sg_req[i].ch_regs.mmio_seq = mmio_seq;
1238 		sg_req[i].ch_regs.mc_seq = mc_seq;
1239 		sg_req[i].len = len;
1240 
1241 		mem += len;
1242 	}
1243 
1244 	dma_desc->cyclic = true;
1245 
1246 	return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags);
1247 }
1248 
tegra_dma_alloc_chan_resources(struct dma_chan * dc)1249 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1250 {
1251 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1252 	int ret;
1253 
1254 	ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
1255 	if (ret) {
1256 		dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name);
1257 		return ret;
1258 	}
1259 
1260 	dma_cookie_init(&tdc->vc.chan);
1261 	tdc->config_init = false;
1262 	return 0;
1263 }
1264 
tegra_dma_chan_synchronize(struct dma_chan * dc)1265 static void tegra_dma_chan_synchronize(struct dma_chan *dc)
1266 {
1267 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1268 
1269 	synchronize_irq(tdc->irq);
1270 	vchan_synchronize(&tdc->vc);
1271 }
1272 
tegra_dma_free_chan_resources(struct dma_chan * dc)1273 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1274 {
1275 	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1276 
1277 	dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1278 
1279 	tegra_dma_terminate_all(dc);
1280 	synchronize_irq(tdc->irq);
1281 
1282 	tasklet_kill(&tdc->vc.task);
1283 	tdc->config_init = false;
1284 	tdc->slave_id = -1;
1285 	tdc->sid_dir = DMA_TRANS_NONE;
1286 	free_irq(tdc->irq, tdc);
1287 
1288 	vchan_free_chan_resources(&tdc->vc);
1289 }
1290 
tegra_dma_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1291 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1292 					   struct of_dma *ofdma)
1293 {
1294 	struct tegra_dma *tdma = ofdma->of_dma_data;
1295 	struct tegra_dma_channel *tdc;
1296 	struct dma_chan *chan;
1297 
1298 	chan = dma_get_any_slave_channel(&tdma->dma_dev);
1299 	if (!chan)
1300 		return NULL;
1301 
1302 	tdc = to_tegra_dma_chan(chan);
1303 	tdc->slave_id = dma_spec->args[0];
1304 
1305 	return chan;
1306 }
1307 
1308 static const struct tegra_dma_chip_data tegra186_dma_chip_data = {
1309 	.nr_channels = 32,
1310 	.channel_reg_size = SZ_64K,
1311 	.max_dma_count = SZ_1G,
1312 	.hw_support_pause = false,
1313 	.terminate = tegra_dma_stop_client,
1314 };
1315 
1316 static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
1317 	.nr_channels = 32,
1318 	.channel_reg_size = SZ_64K,
1319 	.max_dma_count = SZ_1G,
1320 	.hw_support_pause = true,
1321 	.terminate = tegra_dma_pause,
1322 };
1323 
1324 static const struct tegra_dma_chip_data tegra234_dma_chip_data = {
1325 	.nr_channels = 32,
1326 	.channel_reg_size = SZ_64K,
1327 	.max_dma_count = SZ_1G,
1328 	.hw_support_pause = true,
1329 	.terminate = tegra_dma_pause_noerr,
1330 };
1331 
1332 static const struct of_device_id tegra_dma_of_match[] = {
1333 	{
1334 		.compatible = "nvidia,tegra186-gpcdma",
1335 		.data = &tegra186_dma_chip_data,
1336 	}, {
1337 		.compatible = "nvidia,tegra194-gpcdma",
1338 		.data = &tegra194_dma_chip_data,
1339 	}, {
1340 		.compatible = "nvidia,tegra234-gpcdma",
1341 		.data = &tegra234_dma_chip_data,
1342 	}, {
1343 	},
1344 };
1345 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1346 
tegra_dma_program_sid(struct tegra_dma_channel * tdc,int stream_id)1347 static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
1348 {
1349 	unsigned int reg_val =  tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ);
1350 
1351 	reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK);
1352 	reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK);
1353 
1354 	reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id);
1355 	reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id);
1356 
1357 	tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val);
1358 	return 0;
1359 }
1360 
tegra_dma_probe(struct platform_device * pdev)1361 static int tegra_dma_probe(struct platform_device *pdev)
1362 {
1363 	const struct tegra_dma_chip_data *cdata = NULL;
1364 	unsigned int i;
1365 	u32 stream_id;
1366 	struct tegra_dma *tdma;
1367 	int ret;
1368 
1369 	cdata = of_device_get_match_data(&pdev->dev);
1370 
1371 	tdma = devm_kzalloc(&pdev->dev,
1372 			    struct_size(tdma, channels, cdata->nr_channels),
1373 			    GFP_KERNEL);
1374 	if (!tdma)
1375 		return -ENOMEM;
1376 
1377 	tdma->dev = &pdev->dev;
1378 	tdma->chip_data = cdata;
1379 	platform_set_drvdata(pdev, tdma);
1380 
1381 	tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
1382 	if (IS_ERR(tdma->base_addr))
1383 		return PTR_ERR(tdma->base_addr);
1384 
1385 	tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma");
1386 	if (IS_ERR(tdma->rst)) {
1387 		return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst),
1388 			      "Missing controller reset\n");
1389 	}
1390 	reset_control_reset(tdma->rst);
1391 
1392 	tdma->dma_dev.dev = &pdev->dev;
1393 
1394 	if (!tegra_dev_iommu_get_stream_id(&pdev->dev, &stream_id)) {
1395 		dev_err(&pdev->dev, "Missing iommu stream-id\n");
1396 		return -EINVAL;
1397 	}
1398 
1399 	ret = device_property_read_u32(&pdev->dev, "dma-channel-mask",
1400 				       &tdma->chan_mask);
1401 	if (ret) {
1402 		dev_warn(&pdev->dev,
1403 			 "Missing dma-channel-mask property, using default channel mask %#x\n",
1404 			 TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK);
1405 		tdma->chan_mask = TEGRA_GPCDMA_DEFAULT_CHANNEL_MASK;
1406 	}
1407 
1408 	INIT_LIST_HEAD(&tdma->dma_dev.channels);
1409 	for (i = 0; i < cdata->nr_channels; i++) {
1410 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1411 
1412 		/* Check for channel mask */
1413 		if (!(tdma->chan_mask & BIT(i)))
1414 			continue;
1415 
1416 		tdc->irq = platform_get_irq(pdev, i);
1417 		if (tdc->irq < 0)
1418 			return tdc->irq;
1419 
1420 		tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADDR_OFFSET +
1421 					i * cdata->channel_reg_size;
1422 		snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i);
1423 		tdc->tdma = tdma;
1424 		tdc->id = i;
1425 		tdc->slave_id = -1;
1426 
1427 		vchan_init(&tdc->vc, &tdma->dma_dev);
1428 		tdc->vc.desc_free = tegra_dma_desc_free;
1429 
1430 		/* program stream-id for this channel */
1431 		tegra_dma_program_sid(tdc, stream_id);
1432 		tdc->stream_id = stream_id;
1433 	}
1434 
1435 	dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1436 	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1437 	dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask);
1438 	dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask);
1439 	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1440 
1441 	/*
1442 	 * Only word aligned transfers are supported. Set the copy
1443 	 * alignment shift.
1444 	 */
1445 	tdma->dma_dev.copy_align = 2;
1446 	tdma->dma_dev.fill_align = 2;
1447 	tdma->dma_dev.device_alloc_chan_resources =
1448 					tegra_dma_alloc_chan_resources;
1449 	tdma->dma_dev.device_free_chan_resources =
1450 					tegra_dma_free_chan_resources;
1451 	tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1452 	tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy;
1453 	tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset;
1454 	tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1455 	tdma->dma_dev.device_config = tegra_dma_slave_config;
1456 	tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1457 	tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1458 	tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1459 	tdma->dma_dev.device_pause = tegra_dma_device_pause;
1460 	tdma->dma_dev.device_resume = tegra_dma_device_resume;
1461 	tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize;
1462 	tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1463 
1464 	ret = dma_async_device_register(&tdma->dma_dev);
1465 	if (ret < 0) {
1466 		dev_err_probe(&pdev->dev, ret,
1467 			      "GPC DMA driver registration failed\n");
1468 		return ret;
1469 	}
1470 
1471 	ret = of_dma_controller_register(pdev->dev.of_node,
1472 					 tegra_dma_of_xlate, tdma);
1473 	if (ret < 0) {
1474 		dev_err_probe(&pdev->dev, ret,
1475 			      "GPC DMA OF registration failed\n");
1476 
1477 		dma_async_device_unregister(&tdma->dma_dev);
1478 		return ret;
1479 	}
1480 
1481 	dev_info(&pdev->dev, "GPC DMA driver register %lu channels\n",
1482 		 hweight_long(tdma->chan_mask));
1483 
1484 	return 0;
1485 }
1486 
tegra_dma_remove(struct platform_device * pdev)1487 static void tegra_dma_remove(struct platform_device *pdev)
1488 {
1489 	struct tegra_dma *tdma = platform_get_drvdata(pdev);
1490 
1491 	of_dma_controller_free(pdev->dev.of_node);
1492 	dma_async_device_unregister(&tdma->dma_dev);
1493 }
1494 
tegra_dma_pm_suspend(struct device * dev)1495 static int __maybe_unused tegra_dma_pm_suspend(struct device *dev)
1496 {
1497 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1498 	unsigned int i;
1499 
1500 	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1501 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1502 
1503 		if (!(tdma->chan_mask & BIT(i)))
1504 			continue;
1505 
1506 		if (tdc->dma_desc) {
1507 			dev_err(tdma->dev, "channel %u busy\n", i);
1508 			return -EBUSY;
1509 		}
1510 	}
1511 
1512 	return 0;
1513 }
1514 
tegra_dma_pm_resume(struct device * dev)1515 static int __maybe_unused tegra_dma_pm_resume(struct device *dev)
1516 {
1517 	struct tegra_dma *tdma = dev_get_drvdata(dev);
1518 	unsigned int i;
1519 
1520 	reset_control_reset(tdma->rst);
1521 
1522 	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1523 		struct tegra_dma_channel *tdc = &tdma->channels[i];
1524 
1525 		if (!(tdma->chan_mask & BIT(i)))
1526 			continue;
1527 
1528 		tegra_dma_program_sid(tdc, tdc->stream_id);
1529 	}
1530 
1531 	return 0;
1532 }
1533 
1534 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1535 	SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
1536 };
1537 
1538 static struct platform_driver tegra_dma_driver = {
1539 	.driver = {
1540 		.name	= "tegra-gpcdma",
1541 		.pm	= &tegra_dma_dev_pm_ops,
1542 		.of_match_table = tegra_dma_of_match,
1543 	},
1544 	.probe		= tegra_dma_probe,
1545 	.remove		= tegra_dma_remove,
1546 };
1547 
1548 module_platform_driver(tegra_dma_driver);
1549 
1550 MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver");
1551 MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>");
1552 MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>");
1553 MODULE_LICENSE("GPL");
1554