core.c (af9cc93c0dee5fc1f9fa32cd9d79a456738a21be) core.c (2e65060e803e046fc9b5ed0107494a452424845e)
1/*
2 * Core driver for the Synopsys DesignWare DMA Controller
3 *
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify

--- 31 unchanged lines hidden (view full) ---

40#define DWC_DEFAULT_CTLLO(_chan) ({ \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 bool _is_slave = is_slave_direction(_dwc->direction); \
44 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
45 DW_DMA_MSIZE_16; \
46 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
47 DW_DMA_MSIZE_16; \
1/*
2 * Core driver for the Synopsys DesignWare DMA Controller
3 *
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify

--- 31 unchanged lines hidden (view full) ---

40#define DWC_DEFAULT_CTLLO(_chan) ({ \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 bool _is_slave = is_slave_direction(_dwc->direction); \
44 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
45 DW_DMA_MSIZE_16; \
46 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
47 DW_DMA_MSIZE_16; \
48 u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
49 _dwc->p_master : _dwc->m_master; \
50 u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
51 _dwc->p_master : _dwc->m_master; \
48 \
49 (DWC_CTLL_DST_MSIZE(_dmsize) \
50 | DWC_CTLL_SRC_MSIZE(_smsize) \
51 | DWC_CTLL_LLP_D_EN \
52 | DWC_CTLL_LLP_S_EN \
52 \
53 (DWC_CTLL_DST_MSIZE(_dmsize) \
54 | DWC_CTLL_SRC_MSIZE(_smsize) \
55 | DWC_CTLL_LLP_D_EN \
56 | DWC_CTLL_LLP_S_EN \
53 | DWC_CTLL_DMS(_dwc->dst_master) \
54 | DWC_CTLL_SMS(_dwc->src_master)); \
57 | DWC_CTLL_DMS(_dms) \
58 | DWC_CTLL_SMS(_sms)); \
55 })
56
59 })
60
57/*
58 * Number of descriptors to allocate for each channel. This should be
59 * made configurable somehow; preferably, the clients (at least the
60 * ones using slave transfers) should be able to give us a hint.
61 */
62#define NR_DESCS_PER_CHANNEL 64
63
64/* The set of bus widths supported by the DMA controller */
65#define DW_DMA_BUSWIDTHS \
66 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
67 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
68 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
69 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
70
71/*----------------------------------------------------------------------*/
72
73static struct device *chan2dev(struct dma_chan *chan)
74{
75 return &chan->dev->device;
76}
77
78static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
79{
80 return to_dw_desc(dwc->active_list.next);
81}
82
61/* The set of bus widths supported by the DMA controller */
62#define DW_DMA_BUSWIDTHS \
63 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
64 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
65 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
66 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
67
68/*----------------------------------------------------------------------*/
69
70static struct device *chan2dev(struct dma_chan *chan)
71{
72 return &chan->dev->device;
73}
74
75static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
76{
77 return to_dw_desc(dwc->active_list.next);
78}
79
83static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
80static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
84{
81{
85 struct dw_desc *desc, *_desc;
86 struct dw_desc *ret = NULL;
87 unsigned int i = 0;
88 unsigned long flags;
82 struct dw_desc *desc = txd_to_dw_desc(tx);
83 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
84 dma_cookie_t cookie;
85 unsigned long flags;
89
90 spin_lock_irqsave(&dwc->lock, flags);
86
87 spin_lock_irqsave(&dwc->lock, flags);
91 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
92 i++;
93 if (async_tx_test_ack(&desc->txd)) {
94 list_del(&desc->desc_node);
95 ret = desc;
96 break;
97 }
98 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
99 }
88 cookie = dma_cookie_assign(tx);
89
90 /*
91 * REVISIT: We should attempt to chain as many descriptors as
92 * possible, perhaps even appending to those already submitted
93 * for DMA. But this is hard to do in a race-free manner.
94 */
95
96 list_add_tail(&desc->desc_node, &dwc->queue);
100 spin_unlock_irqrestore(&dwc->lock, flags);
97 spin_unlock_irqrestore(&dwc->lock, flags);
98 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
99 __func__, desc->txd.cookie);
101
100
102 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
101 return cookie;
102}
103
103
104 return ret;
104static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
105{
106 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
107 struct dw_desc *desc;
108 dma_addr_t phys;
109
110 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
111 if (!desc)
112 return NULL;
113
114 dwc->descs_allocated++;
115 INIT_LIST_HEAD(&desc->tx_list);
116 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
117 desc->txd.tx_submit = dwc_tx_submit;
118 desc->txd.flags = DMA_CTRL_ACK;
119 desc->txd.phys = phys;
120 return desc;
105}
106
121}
122
107/*
108 * Move a descriptor, including any children, to the free list.
109 * `desc' must not be on any lists.
110 */
111static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
112{
123static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
113 unsigned long flags;
125 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
126 struct dw_desc *child, *_next;
114
127
115 if (desc) {
116 struct dw_desc *child;
128 if (unlikely(!desc))
129 return;
117
130
118 spin_lock_irqsave(&dwc->lock, flags);
119 list_for_each_entry(child, &desc->tx_list, desc_node)
120 dev_vdbg(chan2dev(&dwc->chan),
121 "moving child desc %p to freelist\n",
122 child);
123 list_splice_init(&desc->tx_list, &dwc->free_list);
124 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
125 list_add(&desc->desc_node, &dwc->free_list);
126 spin_unlock_irqrestore(&dwc->lock, flags);
131 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
132 list_del(&child->desc_node);
133 dma_pool_free(dw->desc_pool, child, child->txd.phys);
134 dwc->descs_allocated--;
127 }
135 }
136
137 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
138 dwc->descs_allocated--;
128}
129
130static void dwc_initialize(struct dw_dma_chan *dwc)
131{
132 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
133 u32 cfghi = DWC_CFGH_FIFO_MODE;
134 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
135
139}
140
141static void dwc_initialize(struct dw_dma_chan *dwc)
142{
143 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
144 u32 cfghi = DWC_CFGH_FIFO_MODE;
145 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
146
136 if (dwc->initialized == true)
147 if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
137 return;
138
139 cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
140 cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
141
142 channel_writel(dwc, CFG_LO, cfglo);
143 channel_writel(dwc, CFG_HI, cfghi);
144
145 /* Enable interrupts */
146 channel_set_bit(dw, MASK.XFER, dwc->mask);
147 channel_set_bit(dw, MASK.ERROR, dwc->mask);
148
148 return;
149
150 cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
151 cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
152
153 channel_writel(dwc, CFG_LO, cfglo);
154 channel_writel(dwc, CFG_HI, cfghi);
155
156 /* Enable interrupts */
157 channel_set_bit(dw, MASK.XFER, dwc->mask);
158 channel_set_bit(dw, MASK.ERROR, dwc->mask);
159
149 dwc->initialized = true;
160 set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
150}
151
152/*----------------------------------------------------------------------*/
153
161}
162
163/*----------------------------------------------------------------------*/
164
154static inline unsigned int dwc_fast_ffs(unsigned long long v)
155{
156 /*
157 * We can be a lot more clever here, but this should take care
158 * of the most common optimization.
159 */
160 if (!(v & 7))
161 return 3;
162 else if (!(v & 3))
163 return 2;
164 else if (!(v & 1))
165 return 1;
166 return 0;
167}
168
169static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
170{
171 dev_err(chan2dev(&dwc->chan),
172 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
173 channel_readl(dwc, SAR),
174 channel_readl(dwc, DAR),
175 channel_readl(dwc, LLP),
176 channel_readl(dwc, CTL_HI),

--- 15 unchanged lines hidden (view full) ---

192{
193 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
194 u32 ctllo;
195
196 /*
197 * Software emulation of LLP mode relies on interrupts to continue
198 * multi block transfer.
199 */
165static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
166{
167 dev_err(chan2dev(&dwc->chan),
168 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
169 channel_readl(dwc, SAR),
170 channel_readl(dwc, DAR),
171 channel_readl(dwc, LLP),
172 channel_readl(dwc, CTL_HI),

--- 15 unchanged lines hidden (view full) ---

188{
189 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
190 u32 ctllo;
191
192 /*
193 * Software emulation of LLP mode relies on interrupts to continue
194 * multi block transfer.
195 */
200 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
196 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
201
197
202 channel_writel(dwc, SAR, desc->lli.sar);
203 channel_writel(dwc, DAR, desc->lli.dar);
198 channel_writel(dwc, SAR, lli_read(desc, sar));
199 channel_writel(dwc, DAR, lli_read(desc, dar));
204 channel_writel(dwc, CTL_LO, ctllo);
200 channel_writel(dwc, CTL_LO, ctllo);
205 channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
201 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
206 channel_set_bit(dw, CH_EN, dwc->mask);
207
208 /* Move pointer to next descriptor */
209 dwc->tx_node_active = dwc->tx_node_active->next;
210}
211
212/* Called with dwc->lock held and bh disabled */
213static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
214{
215 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
202 channel_set_bit(dw, CH_EN, dwc->mask);
203
204 /* Move pointer to next descriptor */
205 dwc->tx_node_active = dwc->tx_node_active->next;
206}
207
208/* Called with dwc->lock held and bh disabled */
209static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
210{
211 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
212 u8 lms = DWC_LLP_LMS(dwc->m_master);
216 unsigned long was_soft_llp;
217
218 /* ASSERT: channel is idle */
219 if (dma_readl(dw, CH_EN) & dwc->mask) {
220 dev_err(chan2dev(&dwc->chan),
221 "%s: BUG: Attempted to start non-idle channel\n",
222 __func__);
223 dwc_dump_chan_regs(dwc);

--- 8 unchanged lines hidden (view full) ---

232 if (was_soft_llp) {
233 dev_err(chan2dev(&dwc->chan),
234 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
235 return;
236 }
237
238 dwc_initialize(dwc);
239
213 unsigned long was_soft_llp;
214
215 /* ASSERT: channel is idle */
216 if (dma_readl(dw, CH_EN) & dwc->mask) {
217 dev_err(chan2dev(&dwc->chan),
218 "%s: BUG: Attempted to start non-idle channel\n",
219 __func__);
220 dwc_dump_chan_regs(dwc);

--- 8 unchanged lines hidden (view full) ---

229 if (was_soft_llp) {
230 dev_err(chan2dev(&dwc->chan),
231 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
232 return;
233 }
234
235 dwc_initialize(dwc);
236
240 dwc->residue = first->total_len;
237 first->residue = first->total_len;
241 dwc->tx_node_active = &first->tx_list;
242
243 /* Submit first block */
244 dwc_do_single_block(dwc, first);
245
246 return;
247 }
248
249 dwc_initialize(dwc);
250
238 dwc->tx_node_active = &first->tx_list;
239
240 /* Submit first block */
241 dwc_do_single_block(dwc, first);
242
243 return;
244 }
245
246 dwc_initialize(dwc);
247
251 channel_writel(dwc, LLP, first->txd.phys);
252 channel_writel(dwc, CTL_LO,
253 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
248 channel_writel(dwc, LLP, first->txd.phys | lms);
249 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
254 channel_writel(dwc, CTL_HI, 0);
255 channel_set_bit(dw, CH_EN, dwc->mask);
256}
257
258static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
259{
260 struct dw_desc *desc;
261

--- 26 unchanged lines hidden (view full) ---

288 callback = txd->callback;
289 param = txd->callback_param;
290 }
291
292 /* async_tx_ack */
293 list_for_each_entry(child, &desc->tx_list, desc_node)
294 async_tx_ack(&child->txd);
295 async_tx_ack(&desc->txd);
250 channel_writel(dwc, CTL_HI, 0);
251 channel_set_bit(dw, CH_EN, dwc->mask);
252}
253
254static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
255{
256 struct dw_desc *desc;
257

--- 26 unchanged lines hidden (view full) ---

284 callback = txd->callback;
285 param = txd->callback_param;
286 }
287
288 /* async_tx_ack */
289 list_for_each_entry(child, &desc->tx_list, desc_node)
290 async_tx_ack(&child->txd);
291 async_tx_ack(&desc->txd);
296
297 list_splice_init(&desc->tx_list, &dwc->free_list);
298 list_move(&desc->desc_node, &dwc->free_list);
299
300 dma_descriptor_unmap(txd);
292 dwc_desc_put(dwc, desc);
301 spin_unlock_irqrestore(&dwc->lock, flags);
302
303 if (callback)
304 callback(param);
305}
306
307static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
308{

--- 54 unchanged lines hidden (view full) ---

363 /*
364 * We are inside first active descriptor.
365 * Otherwise something is really wrong.
366 */
367 desc = dwc_first_active(dwc);
368
369 head = &desc->tx_list;
370 if (active != head) {
293 spin_unlock_irqrestore(&dwc->lock, flags);
294
295 if (callback)
296 callback(param);
297}
298
299static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
300{

--- 54 unchanged lines hidden (view full) ---

355 /*
356 * We are inside first active descriptor.
357 * Otherwise something is really wrong.
358 */
359 desc = dwc_first_active(dwc);
360
361 head = &desc->tx_list;
362 if (active != head) {
371 /* Update desc to reflect last sent one */
372 if (active != head->next)
373 desc = to_dw_desc(active->prev);
363 /* Update residue to reflect last sent descriptor */
364 if (active == head->next)
365 desc->residue -= desc->len;
366 else
367 desc->residue -= to_dw_desc(active->prev)->len;
374
368
375 dwc->residue -= desc->len;
376
377 child = to_dw_desc(active);
378
379 /* Submit next block */
380 dwc_do_single_block(dwc, child);
381
382 spin_unlock_irqrestore(&dwc->lock, flags);
383 return;
384 }
385
386 /* We are done here */
387 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
388 }
389
369 child = to_dw_desc(active);
370
371 /* Submit next block */
372 dwc_do_single_block(dwc, child);
373
374 spin_unlock_irqrestore(&dwc->lock, flags);
375 return;
376 }
377
378 /* We are done here */
379 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
380 }
381
390 dwc->residue = 0;
391
392 spin_unlock_irqrestore(&dwc->lock, flags);
393
394 dwc_complete_all(dw, dwc);
395 return;
396 }
397
398 if (list_empty(&dwc->active_list)) {
382 spin_unlock_irqrestore(&dwc->lock, flags);
383
384 dwc_complete_all(dw, dwc);
385 return;
386 }
387
388 if (list_empty(&dwc->active_list)) {
399 dwc->residue = 0;
400 spin_unlock_irqrestore(&dwc->lock, flags);
401 return;
402 }
403
404 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
405 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
406 spin_unlock_irqrestore(&dwc->lock, flags);
407 return;
408 }
409
410 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
411
412 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
413 /* Initial residue value */
389 spin_unlock_irqrestore(&dwc->lock, flags);
390 return;
391 }
392
393 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
394 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
395 spin_unlock_irqrestore(&dwc->lock, flags);
396 return;
397 }
398
399 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
400
401 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
402 /* Initial residue value */
414 dwc->residue = desc->total_len;
403 desc->residue = desc->total_len;
415
416 /* Check first descriptors addr */
404
405 /* Check first descriptors addr */
417 if (desc->txd.phys == llp) {
406 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
418 spin_unlock_irqrestore(&dwc->lock, flags);
419 return;
420 }
421
422 /* Check first descriptors llp */
407 spin_unlock_irqrestore(&dwc->lock, flags);
408 return;
409 }
410
411 /* Check first descriptors llp */
423 if (desc->lli.llp == llp) {
412 if (lli_read(desc, llp) == llp) {
424 /* This one is currently in progress */
413 /* This one is currently in progress */
425 dwc->residue -= dwc_get_sent(dwc);
414 desc->residue -= dwc_get_sent(dwc);
426 spin_unlock_irqrestore(&dwc->lock, flags);
427 return;
428 }
429
415 spin_unlock_irqrestore(&dwc->lock, flags);
416 return;
417 }
418
430 dwc->residue -= desc->len;
419 desc->residue -= desc->len;
431 list_for_each_entry(child, &desc->tx_list, desc_node) {
420 list_for_each_entry(child, &desc->tx_list, desc_node) {
432 if (child->lli.llp == llp) {
421 if (lli_read(child, llp) == llp) {
433 /* Currently in progress */
422 /* Currently in progress */
434 dwc->residue -= dwc_get_sent(dwc);
423 desc->residue -= dwc_get_sent(dwc);
435 spin_unlock_irqrestore(&dwc->lock, flags);
436 return;
437 }
424 spin_unlock_irqrestore(&dwc->lock, flags);
425 return;
426 }
438 dwc->residue -= child->len;
427 desc->residue -= child->len;
439 }
440
441 /*
442 * No descriptors so far seem to be in progress, i.e.
443 * this one must be done.
444 */
445 spin_unlock_irqrestore(&dwc->lock, flags);
446 dwc_descriptor_complete(dwc, desc, true);

--- 5 unchanged lines hidden (view full) ---

452
453 /* Try to continue after resetting the channel... */
454 dwc_chan_disable(dw, dwc);
455
456 dwc_dostart_first_queued(dwc);
457 spin_unlock_irqrestore(&dwc->lock, flags);
458}
459
428 }
429
430 /*
431 * No descriptors so far seem to be in progress, i.e.
432 * this one must be done.
433 */
434 spin_unlock_irqrestore(&dwc->lock, flags);
435 dwc_descriptor_complete(dwc, desc, true);

--- 5 unchanged lines hidden (view full) ---

441
442 /* Try to continue after resetting the channel... */
443 dwc_chan_disable(dw, dwc);
444
445 dwc_dostart_first_queued(dwc);
446 spin_unlock_irqrestore(&dwc->lock, flags);
447}
448
460static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
449static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
461{
462 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
450{
451 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
463 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
452 lli_read(desc, sar),
453 lli_read(desc, dar),
454 lli_read(desc, llp),
455 lli_read(desc, ctlhi),
456 lli_read(desc, ctllo));
464}
465
466static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
467{
468 struct dw_desc *bad_desc;
469 struct dw_desc *child;
470 unsigned long flags;
471

--- 19 unchanged lines hidden (view full) ---

491 * WARN may seem harsh, but since this only happens
492 * when someone submits a bad physical address in a
493 * descriptor, we should consider ourselves lucky that the
494 * controller flagged an error instead of scribbling over
495 * random memory locations.
496 */
497 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
498 " cookie: %d\n", bad_desc->txd.cookie);
457}
458
459static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
460{
461 struct dw_desc *bad_desc;
462 struct dw_desc *child;
463 unsigned long flags;
464

--- 19 unchanged lines hidden (view full) ---

484 * WARN may seem harsh, but since this only happens
485 * when someone submits a bad physical address in a
486 * descriptor, we should consider ourselves lucky that the
487 * controller flagged an error instead of scribbling over
488 * random memory locations.
489 */
490 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
491 " cookie: %d\n", bad_desc->txd.cookie);
499 dwc_dump_lli(dwc, &bad_desc->lli);
492 dwc_dump_lli(dwc, bad_desc);
500 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
493 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
501 dwc_dump_lli(dwc, &child->lli);
494 dwc_dump_lli(dwc, child);
502
503 spin_unlock_irqrestore(&dwc->lock, flags);
504
505 /* Pretend the descriptor completed successfully */
506 dwc_descriptor_complete(dwc, bad_desc, true);
507}
508
509/* --------------------- Cyclic DMA API extensions -------------------- */

--- 34 unchanged lines hidden (view full) ---

544 }
545
546 /*
547 * Error and transfer complete are highly unlikely, and will most
548 * likely be due to a configuration error by the user.
549 */
550 if (unlikely(status_err & dwc->mask) ||
551 unlikely(status_xfer & dwc->mask)) {
495
496 spin_unlock_irqrestore(&dwc->lock, flags);
497
498 /* Pretend the descriptor completed successfully */
499 dwc_descriptor_complete(dwc, bad_desc, true);
500}
501
502/* --------------------- Cyclic DMA API extensions -------------------- */

--- 34 unchanged lines hidden (view full) ---

537 }
538
539 /*
540 * Error and transfer complete are highly unlikely, and will most
541 * likely be due to a configuration error by the user.
542 */
543 if (unlikely(status_err & dwc->mask) ||
544 unlikely(status_xfer & dwc->mask)) {
552 int i;
545 unsigned int i;
553
554 dev_err(chan2dev(&dwc->chan),
555 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
556 status_xfer ? "xfer" : "error");
557
558 spin_lock_irqsave(&dwc->lock, flags);
559
560 dwc_dump_chan_regs(dwc);

--- 5 unchanged lines hidden (view full) ---

566 channel_writel(dwc, CTL_LO, 0);
567 channel_writel(dwc, CTL_HI, 0);
568
569 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
570 dma_writel(dw, CLEAR.ERROR, dwc->mask);
571 dma_writel(dw, CLEAR.XFER, dwc->mask);
572
573 for (i = 0; i < dwc->cdesc->periods; i++)
546
547 dev_err(chan2dev(&dwc->chan),
548 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
549 status_xfer ? "xfer" : "error");
550
551 spin_lock_irqsave(&dwc->lock, flags);
552
553 dwc_dump_chan_regs(dwc);

--- 5 unchanged lines hidden (view full) ---

559 channel_writel(dwc, CTL_LO, 0);
560 channel_writel(dwc, CTL_HI, 0);
561
562 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
563 dma_writel(dw, CLEAR.ERROR, dwc->mask);
564 dma_writel(dw, CLEAR.XFER, dwc->mask);
565
566 for (i = 0; i < dwc->cdesc->periods; i++)
574 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
567 dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
575
576 spin_unlock_irqrestore(&dwc->lock, flags);
577 }
578
579 /* Re-enable interrupts */
580 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
581}
582
583/* ------------------------------------------------------------------------- */
584
585static void dw_dma_tasklet(unsigned long data)
586{
587 struct dw_dma *dw = (struct dw_dma *)data;
588 struct dw_dma_chan *dwc;
589 u32 status_block;
590 u32 status_xfer;
591 u32 status_err;
568
569 spin_unlock_irqrestore(&dwc->lock, flags);
570 }
571
572 /* Re-enable interrupts */
573 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
574}
575
576/* ------------------------------------------------------------------------- */
577
578static void dw_dma_tasklet(unsigned long data)
579{
580 struct dw_dma *dw = (struct dw_dma *)data;
581 struct dw_dma_chan *dwc;
582 u32 status_block;
583 u32 status_xfer;
584 u32 status_err;
592 int i;
585 unsigned int i;
593
594 status_block = dma_readl(dw, RAW.BLOCK);
595 status_xfer = dma_readl(dw, RAW.XFER);
596 status_err = dma_readl(dw, RAW.ERROR);
597
598 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
599
600 for (i = 0; i < dw->dma.chancnt; i++) {

--- 52 unchanged lines hidden (view full) ---

653
654 tasklet_schedule(&dw->tasklet);
655
656 return IRQ_HANDLED;
657}
658
659/*----------------------------------------------------------------------*/
660
586
587 status_block = dma_readl(dw, RAW.BLOCK);
588 status_xfer = dma_readl(dw, RAW.XFER);
589 status_err = dma_readl(dw, RAW.ERROR);
590
591 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
592
593 for (i = 0; i < dw->dma.chancnt; i++) {

--- 52 unchanged lines hidden (view full) ---

646
647 tasklet_schedule(&dw->tasklet);
648
649 return IRQ_HANDLED;
650}
651
652/*----------------------------------------------------------------------*/
653
661static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
662{
663 struct dw_desc *desc = txd_to_dw_desc(tx);
664 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
665 dma_cookie_t cookie;
666 unsigned long flags;
667
668 spin_lock_irqsave(&dwc->lock, flags);
669 cookie = dma_cookie_assign(tx);
670
671 /*
672 * REVISIT: We should attempt to chain as many descriptors as
673 * possible, perhaps even appending to those already submitted
674 * for DMA. But this is hard to do in a race-free manner.
675 */
676
677 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
678 list_add_tail(&desc->desc_node, &dwc->queue);
679
680 spin_unlock_irqrestore(&dwc->lock, flags);
681
682 return cookie;
683}
684
685static struct dma_async_tx_descriptor *
686dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
687 size_t len, unsigned long flags)
688{
689 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
690 struct dw_dma *dw = to_dw_dma(chan->device);
691 struct dw_desc *desc;
692 struct dw_desc *first;
693 struct dw_desc *prev;
694 size_t xfer_count;
695 size_t offset;
654static struct dma_async_tx_descriptor *
655dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
656 size_t len, unsigned long flags)
657{
658 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
659 struct dw_dma *dw = to_dw_dma(chan->device);
660 struct dw_desc *desc;
661 struct dw_desc *first;
662 struct dw_desc *prev;
663 size_t xfer_count;
664 size_t offset;
665 u8 m_master = dwc->m_master;
696 unsigned int src_width;
697 unsigned int dst_width;
666 unsigned int src_width;
667 unsigned int dst_width;
698 unsigned int data_width;
668 unsigned int data_width = dw->data_width[m_master];
699 u32 ctllo;
669 u32 ctllo;
670 u8 lms = DWC_LLP_LMS(m_master);
700
701 dev_vdbg(chan2dev(chan),
702 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
703 &dest, &src, len, flags);
704
705 if (unlikely(!len)) {
706 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
707 return NULL;
708 }
709
710 dwc->direction = DMA_MEM_TO_MEM;
711
671
672 dev_vdbg(chan2dev(chan),
673 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
674 &dest, &src, len, flags);
675
676 if (unlikely(!len)) {
677 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
678 return NULL;
679 }
680
681 dwc->direction = DMA_MEM_TO_MEM;
682
712 data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
713 dw->data_width[dwc->dst_master]);
683 src_width = dst_width = __ffs(data_width | src | dest | len);
714
684
715 src_width = dst_width = min_t(unsigned int, data_width,
716 dwc_fast_ffs(src | dest | len));
717
718 ctllo = DWC_DEFAULT_CTLLO(chan)
719 | DWC_CTLL_DST_WIDTH(dst_width)
720 | DWC_CTLL_SRC_WIDTH(src_width)
721 | DWC_CTLL_DST_INC
722 | DWC_CTLL_SRC_INC
723 | DWC_CTLL_FC_M2M;
724 prev = first = NULL;
725
726 for (offset = 0; offset < len; offset += xfer_count << src_width) {
727 xfer_count = min_t(size_t, (len - offset) >> src_width,
728 dwc->block_size);
729
730 desc = dwc_desc_get(dwc);
731 if (!desc)
732 goto err_desc_get;
733
685 ctllo = DWC_DEFAULT_CTLLO(chan)
686 | DWC_CTLL_DST_WIDTH(dst_width)
687 | DWC_CTLL_SRC_WIDTH(src_width)
688 | DWC_CTLL_DST_INC
689 | DWC_CTLL_SRC_INC
690 | DWC_CTLL_FC_M2M;
691 prev = first = NULL;
692
693 for (offset = 0; offset < len; offset += xfer_count << src_width) {
694 xfer_count = min_t(size_t, (len - offset) >> src_width,
695 dwc->block_size);
696
697 desc = dwc_desc_get(dwc);
698 if (!desc)
699 goto err_desc_get;
700
734 desc->lli.sar = src + offset;
735 desc->lli.dar = dest + offset;
736 desc->lli.ctllo = ctllo;
737 desc->lli.ctlhi = xfer_count;
701 lli_write(desc, sar, src + offset);
702 lli_write(desc, dar, dest + offset);
703 lli_write(desc, ctllo, ctllo);
704 lli_write(desc, ctlhi, xfer_count);
738 desc->len = xfer_count << src_width;
739
740 if (!first) {
741 first = desc;
742 } else {
705 desc->len = xfer_count << src_width;
706
707 if (!first) {
708 first = desc;
709 } else {
743 prev->lli.llp = desc->txd.phys;
744 list_add_tail(&desc->desc_node,
745 &first->tx_list);
710 lli_write(prev, llp, desc->txd.phys | lms);
711 list_add_tail(&desc->desc_node, &first->tx_list);
746 }
747 prev = desc;
748 }
749
750 if (flags & DMA_PREP_INTERRUPT)
751 /* Trigger interrupt after last block */
712 }
713 prev = desc;
714 }
715
716 if (flags & DMA_PREP_INTERRUPT)
717 /* Trigger interrupt after last block */
752 prev->lli.ctllo |= DWC_CTLL_INT_EN;
718 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
753
754 prev->lli.llp = 0;
719
720 prev->lli.llp = 0;
721 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
755 first->txd.flags = flags;
756 first->total_len = len;
757
758 return &first->txd;
759
760err_desc_get:
761 dwc_desc_put(dwc, first);
762 return NULL;

--- 5 unchanged lines hidden (view full) ---

768 unsigned long flags, void *context)
769{
770 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
771 struct dw_dma *dw = to_dw_dma(chan->device);
772 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
773 struct dw_desc *prev;
774 struct dw_desc *first;
775 u32 ctllo;
722 first->txd.flags = flags;
723 first->total_len = len;
724
725 return &first->txd;
726
727err_desc_get:
728 dwc_desc_put(dwc, first);
729 return NULL;

--- 5 unchanged lines hidden (view full) ---

735 unsigned long flags, void *context)
736{
737 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
738 struct dw_dma *dw = to_dw_dma(chan->device);
739 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
740 struct dw_desc *prev;
741 struct dw_desc *first;
742 u32 ctllo;
743 u8 m_master = dwc->m_master;
744 u8 lms = DWC_LLP_LMS(m_master);
776 dma_addr_t reg;
777 unsigned int reg_width;
778 unsigned int mem_width;
745 dma_addr_t reg;
746 unsigned int reg_width;
747 unsigned int mem_width;
779 unsigned int data_width;
748 unsigned int data_width = dw->data_width[m_master];
780 unsigned int i;
781 struct scatterlist *sg;
782 size_t total_len = 0;
783
784 dev_vdbg(chan2dev(chan), "%s\n", __func__);
785
786 if (unlikely(!is_slave_direction(direction) || !sg_len))
787 return NULL;

--- 9 unchanged lines hidden (view full) ---

797 ctllo = (DWC_DEFAULT_CTLLO(chan)
798 | DWC_CTLL_DST_WIDTH(reg_width)
799 | DWC_CTLL_DST_FIX
800 | DWC_CTLL_SRC_INC);
801
802 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
803 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
804
749 unsigned int i;
750 struct scatterlist *sg;
751 size_t total_len = 0;
752
753 dev_vdbg(chan2dev(chan), "%s\n", __func__);
754
755 if (unlikely(!is_slave_direction(direction) || !sg_len))
756 return NULL;

--- 9 unchanged lines hidden (view full) ---

766 ctllo = (DWC_DEFAULT_CTLLO(chan)
767 | DWC_CTLL_DST_WIDTH(reg_width)
768 | DWC_CTLL_DST_FIX
769 | DWC_CTLL_SRC_INC);
770
771 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
772 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
773
805 data_width = dw->data_width[dwc->src_master];
806
807 for_each_sg(sgl, sg, sg_len, i) {
808 struct dw_desc *desc;
809 u32 len, dlen, mem;
810
811 mem = sg_dma_address(sg);
812 len = sg_dma_len(sg);
813
774 for_each_sg(sgl, sg, sg_len, i) {
775 struct dw_desc *desc;
776 u32 len, dlen, mem;
777
778 mem = sg_dma_address(sg);
779 len = sg_dma_len(sg);
780
814 mem_width = min_t(unsigned int,
815 data_width, dwc_fast_ffs(mem | len));
781 mem_width = __ffs(data_width | mem | len);
816
817slave_sg_todev_fill_desc:
818 desc = dwc_desc_get(dwc);
819 if (!desc)
820 goto err_desc_get;
821
782
783slave_sg_todev_fill_desc:
784 desc = dwc_desc_get(dwc);
785 if (!desc)
786 goto err_desc_get;
787
822 desc->lli.sar = mem;
823 desc->lli.dar = reg;
824 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
788 lli_write(desc, sar, mem);
789 lli_write(desc, dar, reg);
790 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
825 if ((len >> mem_width) > dwc->block_size) {
826 dlen = dwc->block_size << mem_width;
827 mem += dlen;
828 len -= dlen;
829 } else {
830 dlen = len;
831 len = 0;
832 }
833
791 if ((len >> mem_width) > dwc->block_size) {
792 dlen = dwc->block_size << mem_width;
793 mem += dlen;
794 len -= dlen;
795 } else {
796 dlen = len;
797 len = 0;
798 }
799
834 desc->lli.ctlhi = dlen >> mem_width;
800 lli_write(desc, ctlhi, dlen >> mem_width);
835 desc->len = dlen;
836
837 if (!first) {
838 first = desc;
839 } else {
801 desc->len = dlen;
802
803 if (!first) {
804 first = desc;
805 } else {
840 prev->lli.llp = desc->txd.phys;
841 list_add_tail(&desc->desc_node,
842 &first->tx_list);
806 lli_write(prev, llp, desc->txd.phys | lms);
807 list_add_tail(&desc->desc_node, &first->tx_list);
843 }
844 prev = desc;
845 total_len += dlen;
846
847 if (len)
848 goto slave_sg_todev_fill_desc;
849 }
850 break;
851 case DMA_DEV_TO_MEM:
852 reg_width = __ffs(sconfig->src_addr_width);
853 reg = sconfig->src_addr;
854 ctllo = (DWC_DEFAULT_CTLLO(chan)
855 | DWC_CTLL_SRC_WIDTH(reg_width)
856 | DWC_CTLL_DST_INC
857 | DWC_CTLL_SRC_FIX);
858
859 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
860 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
861
808 }
809 prev = desc;
810 total_len += dlen;
811
812 if (len)
813 goto slave_sg_todev_fill_desc;
814 }
815 break;
816 case DMA_DEV_TO_MEM:
817 reg_width = __ffs(sconfig->src_addr_width);
818 reg = sconfig->src_addr;
819 ctllo = (DWC_DEFAULT_CTLLO(chan)
820 | DWC_CTLL_SRC_WIDTH(reg_width)
821 | DWC_CTLL_DST_INC
822 | DWC_CTLL_SRC_FIX);
823
824 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
825 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
826
862 data_width = dw->data_width[dwc->dst_master];
863
864 for_each_sg(sgl, sg, sg_len, i) {
865 struct dw_desc *desc;
866 u32 len, dlen, mem;
867
868 mem = sg_dma_address(sg);
869 len = sg_dma_len(sg);
870
827 for_each_sg(sgl, sg, sg_len, i) {
828 struct dw_desc *desc;
829 u32 len, dlen, mem;
830
831 mem = sg_dma_address(sg);
832 len = sg_dma_len(sg);
833
871 mem_width = min_t(unsigned int,
872 data_width, dwc_fast_ffs(mem | len));
834 mem_width = __ffs(data_width | mem | len);
873
874slave_sg_fromdev_fill_desc:
875 desc = dwc_desc_get(dwc);
876 if (!desc)
877 goto err_desc_get;
878
835
836slave_sg_fromdev_fill_desc:
837 desc = dwc_desc_get(dwc);
838 if (!desc)
839 goto err_desc_get;
840
879 desc->lli.sar = reg;
880 desc->lli.dar = mem;
881 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
841 lli_write(desc, sar, reg);
842 lli_write(desc, dar, mem);
843 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
882 if ((len >> reg_width) > dwc->block_size) {
883 dlen = dwc->block_size << reg_width;
884 mem += dlen;
885 len -= dlen;
886 } else {
887 dlen = len;
888 len = 0;
889 }
844 if ((len >> reg_width) > dwc->block_size) {
845 dlen = dwc->block_size << reg_width;
846 mem += dlen;
847 len -= dlen;
848 } else {
849 dlen = len;
850 len = 0;
851 }
890 desc->lli.ctlhi = dlen >> reg_width;
852 lli_write(desc, ctlhi, dlen >> reg_width);
891 desc->len = dlen;
892
893 if (!first) {
894 first = desc;
895 } else {
853 desc->len = dlen;
854
855 if (!first) {
856 first = desc;
857 } else {
896 prev->lli.llp = desc->txd.phys;
897 list_add_tail(&desc->desc_node,
898 &first->tx_list);
858 lli_write(prev, llp, desc->txd.phys | lms);
859 list_add_tail(&desc->desc_node, &first->tx_list);
899 }
900 prev = desc;
901 total_len += dlen;
902
903 if (len)
904 goto slave_sg_fromdev_fill_desc;
905 }
906 break;
907 default:
908 return NULL;
909 }
910
911 if (flags & DMA_PREP_INTERRUPT)
912 /* Trigger interrupt after last block */
860 }
861 prev = desc;
862 total_len += dlen;
863
864 if (len)
865 goto slave_sg_fromdev_fill_desc;
866 }
867 break;
868 default:
869 return NULL;
870 }
871
872 if (flags & DMA_PREP_INTERRUPT)
873 /* Trigger interrupt after last block */
913 prev->lli.ctllo |= DWC_CTLL_INT_EN;
874 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
914
915 prev->lli.llp = 0;
875
876 prev->lli.llp = 0;
877 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
916 first->total_len = total_len;
917
918 return &first->txd;
919
920err_desc_get:
921 dev_err(chan2dev(chan),
922 "not enough descriptors available. Direction %d\n", direction);
923 dwc_desc_put(dwc, first);

--- 8 unchanged lines hidden (view full) ---

932 if (dws->dma_dev != chan->device->dev)
933 return false;
934
935 /* We have to copy data since dws can be temporary storage */
936
937 dwc->src_id = dws->src_id;
938 dwc->dst_id = dws->dst_id;
939
878 first->total_len = total_len;
879
880 return &first->txd;
881
882err_desc_get:
883 dev_err(chan2dev(chan),
884 "not enough descriptors available. Direction %d\n", direction);
885 dwc_desc_put(dwc, first);

--- 8 unchanged lines hidden (view full) ---

894 if (dws->dma_dev != chan->device->dev)
895 return false;
896
897 /* We have to copy data since dws can be temporary storage */
898
899 dwc->src_id = dws->src_id;
900 dwc->dst_id = dws->dst_id;
901
940 dwc->src_master = dws->src_master;
941 dwc->dst_master = dws->dst_master;
902 dwc->m_master = dws->m_master;
903 dwc->p_master = dws->p_master;
942
943 return true;
944}
945EXPORT_SYMBOL_GPL(dw_dma_filter);
946
947/*
948 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
949 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.

--- 36 unchanged lines hidden (view full) ---

986
987 spin_lock_irqsave(&dwc->lock, flags);
988
989 cfglo = channel_readl(dwc, CFG_LO);
990 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
991 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
992 udelay(2);
993
904
905 return true;
906}
907EXPORT_SYMBOL_GPL(dw_dma_filter);
908
909/*
910 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
911 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.

--- 36 unchanged lines hidden (view full) ---

948
949 spin_lock_irqsave(&dwc->lock, flags);
950
951 cfglo = channel_readl(dwc, CFG_LO);
952 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
953 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
954 udelay(2);
955
994 dwc->paused = true;
956 set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
995
996 spin_unlock_irqrestore(&dwc->lock, flags);
997
998 return 0;
999}
1000
1001static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
1002{
1003 u32 cfglo = channel_readl(dwc, CFG_LO);
1004
1005 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
1006
957
958 spin_unlock_irqrestore(&dwc->lock, flags);
959
960 return 0;
961}
962
963static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
964{
965 u32 cfglo = channel_readl(dwc, CFG_LO);
966
967 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
968
1007 dwc->paused = false;
969 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
1008}
1009
1010static int dwc_resume(struct dma_chan *chan)
1011{
1012 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1013 unsigned long flags;
1014
970}
971
972static int dwc_resume(struct dma_chan *chan)
973{
974 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
975 unsigned long flags;
976
1015 if (!dwc->paused)
1016 return 0;
1017
1018 spin_lock_irqsave(&dwc->lock, flags);
1019
977 spin_lock_irqsave(&dwc->lock, flags);
978
1020 dwc_chan_resume(dwc);
979 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
980 dwc_chan_resume(dwc);
1021
1022 spin_unlock_irqrestore(&dwc->lock, flags);
1023
1024 return 0;
1025}
1026
1027static int dwc_terminate_all(struct dma_chan *chan)
1028{

--- 19 unchanged lines hidden (view full) ---

1048
1049 /* Flush all pending and queued descriptors */
1050 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1051 dwc_descriptor_complete(dwc, desc, false);
1052
1053 return 0;
1054}
1055
981
982 spin_unlock_irqrestore(&dwc->lock, flags);
983
984 return 0;
985}
986
987static int dwc_terminate_all(struct dma_chan *chan)
988{

--- 19 unchanged lines hidden (view full) ---

1008
1009 /* Flush all pending and queued descriptors */
1010 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1011 dwc_descriptor_complete(dwc, desc, false);
1012
1013 return 0;
1014}
1015
1056static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
1016static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
1057{
1017{
1018 struct dw_desc *desc;
1019
1020 list_for_each_entry(desc, &dwc->active_list, desc_node)
1021 if (desc->txd.cookie == c)
1022 return desc;
1023
1024 return NULL;
1025}
1026
1027static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
1028{
1029 struct dw_desc *desc;
1058 unsigned long flags;
1059 u32 residue;
1060
1061 spin_lock_irqsave(&dwc->lock, flags);
1062
1030 unsigned long flags;
1031 u32 residue;
1032
1033 spin_lock_irqsave(&dwc->lock, flags);
1034
1063 residue = dwc->residue;
1064 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1065 residue -= dwc_get_sent(dwc);
1035 desc = dwc_find_desc(dwc, cookie);
1036 if (desc) {
1037 if (desc == dwc_first_active(dwc)) {
1038 residue = desc->residue;
1039 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1040 residue -= dwc_get_sent(dwc);
1041 } else {
1042 residue = desc->total_len;
1043 }
1044 } else {
1045 residue = 0;
1046 }
1066
1067 spin_unlock_irqrestore(&dwc->lock, flags);
1068 return residue;
1069}
1070
1071static enum dma_status
1072dwc_tx_status(struct dma_chan *chan,
1073 dma_cookie_t cookie,

--- 4 unchanged lines hidden (view full) ---

1078
1079 ret = dma_cookie_status(chan, cookie, txstate);
1080 if (ret == DMA_COMPLETE)
1081 return ret;
1082
1083 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1084
1085 ret = dma_cookie_status(chan, cookie, txstate);
1047
1048 spin_unlock_irqrestore(&dwc->lock, flags);
1049 return residue;
1050}
1051
1052static enum dma_status
1053dwc_tx_status(struct dma_chan *chan,
1054 dma_cookie_t cookie,

--- 4 unchanged lines hidden (view full) ---

1059
1060 ret = dma_cookie_status(chan, cookie, txstate);
1061 if (ret == DMA_COMPLETE)
1062 return ret;
1063
1064 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1065
1066 ret = dma_cookie_status(chan, cookie, txstate);
1086 if (ret != DMA_COMPLETE)
1087 dma_set_residue(txstate, dwc_get_residue(dwc));
1067 if (ret == DMA_COMPLETE)
1068 return ret;
1088
1069
1089 if (dwc->paused && ret == DMA_IN_PROGRESS)
1070 dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
1071
1072 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
1090 return DMA_PAUSED;
1091
1092 return ret;
1093}
1094
1095static void dwc_issue_pending(struct dma_chan *chan)
1096{
1097 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);

--- 4 unchanged lines hidden (view full) ---

1102 dwc_dostart_first_queued(dwc);
1103 spin_unlock_irqrestore(&dwc->lock, flags);
1104}
1105
1106/*----------------------------------------------------------------------*/
1107
1108static void dw_dma_off(struct dw_dma *dw)
1109{
1073 return DMA_PAUSED;
1074
1075 return ret;
1076}
1077
1078static void dwc_issue_pending(struct dma_chan *chan)
1079{
1080 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);

--- 4 unchanged lines hidden (view full) ---

1085 dwc_dostart_first_queued(dwc);
1086 spin_unlock_irqrestore(&dwc->lock, flags);
1087}
1088
1089/*----------------------------------------------------------------------*/
1090
1091static void dw_dma_off(struct dw_dma *dw)
1092{
1110 int i;
1093 unsigned int i;
1111
1112 dma_writel(dw, CFG, 0);
1113
1114 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1115 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1116 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1117 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1118 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1119
1120 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1121 cpu_relax();
1122
1123 for (i = 0; i < dw->dma.chancnt; i++)
1094
1095 dma_writel(dw, CFG, 0);
1096
1097 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1098 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1099 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1100 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1101 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1102
1103 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1104 cpu_relax();
1105
1106 for (i = 0; i < dw->dma.chancnt; i++)
1124 dw->chan[i].initialized = false;
1107 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
1125}
1126
1127static void dw_dma_on(struct dw_dma *dw)
1128{
1129 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1130}
1131
1132static int dwc_alloc_chan_resources(struct dma_chan *chan)
1133{
1134 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1135 struct dw_dma *dw = to_dw_dma(chan->device);
1108}
1109
1110static void dw_dma_on(struct dw_dma *dw)
1111{
1112 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1113}
1114
1115static int dwc_alloc_chan_resources(struct dma_chan *chan)
1116{
1117 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1118 struct dw_dma *dw = to_dw_dma(chan->device);
1136 struct dw_desc *desc;
1137 int i;
1138 unsigned long flags;
1139
1140 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1141
1142 /* ASSERT: channel is idle */
1143 if (dma_readl(dw, CH_EN) & dwc->mask) {
1144 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1145 return -EIO;
1146 }

--- 14 unchanged lines hidden (view full) ---

1161 return -EINVAL;
1162 }
1163
1164 /* Enable controller here if needed */
1165 if (!dw->in_use)
1166 dw_dma_on(dw);
1167 dw->in_use |= dwc->mask;
1168
1119
1120 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1121
1122 /* ASSERT: channel is idle */
1123 if (dma_readl(dw, CH_EN) & dwc->mask) {
1124 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1125 return -EIO;
1126 }

--- 14 unchanged lines hidden (view full) ---

1141 return -EINVAL;
1142 }
1143
1144 /* Enable controller here if needed */
1145 if (!dw->in_use)
1146 dw_dma_on(dw);
1147 dw->in_use |= dwc->mask;
1148
1169 spin_lock_irqsave(&dwc->lock, flags);
1170 i = dwc->descs_allocated;
1171 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1172 dma_addr_t phys;
1173
1174 spin_unlock_irqrestore(&dwc->lock, flags);
1175
1176 desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
1177 if (!desc)
1178 goto err_desc_alloc;
1179
1180 memset(desc, 0, sizeof(struct dw_desc));
1181
1182 INIT_LIST_HEAD(&desc->tx_list);
1183 dma_async_tx_descriptor_init(&desc->txd, chan);
1184 desc->txd.tx_submit = dwc_tx_submit;
1185 desc->txd.flags = DMA_CTRL_ACK;
1186 desc->txd.phys = phys;
1187
1188 dwc_desc_put(dwc, desc);
1189
1190 spin_lock_irqsave(&dwc->lock, flags);
1191 i = ++dwc->descs_allocated;
1192 }
1193
1194 spin_unlock_irqrestore(&dwc->lock, flags);
1195
1196 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1197
1198 return i;
1199
1200err_desc_alloc:
1201 dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
1202
1203 return i;
1149 return 0;
1204}
1205
1206static void dwc_free_chan_resources(struct dma_chan *chan)
1207{
1208 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1209 struct dw_dma *dw = to_dw_dma(chan->device);
1150}
1151
1152static void dwc_free_chan_resources(struct dma_chan *chan)
1153{
1154 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1155 struct dw_dma *dw = to_dw_dma(chan->device);
1210 struct dw_desc *desc, *_desc;
1211 unsigned long flags;
1212 LIST_HEAD(list);
1213
1214 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1215 dwc->descs_allocated);
1216
1217 /* ASSERT: channel is idle */
1218 BUG_ON(!list_empty(&dwc->active_list));
1219 BUG_ON(!list_empty(&dwc->queue));
1220 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1221
1222 spin_lock_irqsave(&dwc->lock, flags);
1156 unsigned long flags;
1157 LIST_HEAD(list);
1158
1159 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1160 dwc->descs_allocated);
1161
1162 /* ASSERT: channel is idle */
1163 BUG_ON(!list_empty(&dwc->active_list));
1164 BUG_ON(!list_empty(&dwc->queue));
1165 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1166
1167 spin_lock_irqsave(&dwc->lock, flags);
1223 list_splice_init(&dwc->free_list, &list);
1224 dwc->descs_allocated = 0;
1225
1226 /* Clear custom channel configuration */
1227 dwc->src_id = 0;
1228 dwc->dst_id = 0;
1229
1168
1169 /* Clear custom channel configuration */
1170 dwc->src_id = 0;
1171 dwc->dst_id = 0;
1172
1230 dwc->src_master = 0;
1231 dwc->dst_master = 0;
1173 dwc->m_master = 0;
1174 dwc->p_master = 0;
1232
1175
1233 dwc->initialized = false;
1176 clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
1234
1235 /* Disable interrupts */
1236 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1237 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1238 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1239
1240 spin_unlock_irqrestore(&dwc->lock, flags);
1241
1242 /* Disable controller in case it was a last user */
1243 dw->in_use &= ~dwc->mask;
1244 if (!dw->in_use)
1245 dw_dma_off(dw);
1246
1177
1178 /* Disable interrupts */
1179 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1180 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1181 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1182
1183 spin_unlock_irqrestore(&dwc->lock, flags);
1184
1185 /* Disable controller in case it was a last user */
1186 dw->in_use &= ~dwc->mask;
1187 if (!dw->in_use)
1188 dw_dma_off(dw);
1189
1247 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1248 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1249 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
1250 }
1251
1252 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1253}
1254
1255/* --------------------- Cyclic DMA API extensions -------------------- */
1256
1257/**
1258 * dw_dma_cyclic_start - start the cyclic DMA transfer
1259 * @chan: the DMA channel to start

--- 61 unchanged lines hidden (view full) ---

1321 enum dma_transfer_direction direction)
1322{
1323 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1324 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
1325 struct dw_cyclic_desc *cdesc;
1326 struct dw_cyclic_desc *retval = NULL;
1327 struct dw_desc *desc;
1328 struct dw_desc *last = NULL;
1190 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1191}
1192
1193/* --------------------- Cyclic DMA API extensions -------------------- */
1194
1195/**
1196 * dw_dma_cyclic_start - start the cyclic DMA transfer
1197 * @chan: the DMA channel to start

--- 61 unchanged lines hidden (view full) ---

1259 enum dma_transfer_direction direction)
1260{
1261 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1262 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
1263 struct dw_cyclic_desc *cdesc;
1264 struct dw_cyclic_desc *retval = NULL;
1265 struct dw_desc *desc;
1266 struct dw_desc *last = NULL;
1267 u8 lms = DWC_LLP_LMS(dwc->m_master);
1329 unsigned long was_cyclic;
1330 unsigned int reg_width;
1331 unsigned int periods;
1332 unsigned int i;
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&dwc->lock, flags);
1336 if (dwc->nollp) {

--- 37 unchanged lines hidden (view full) ---

1374 goto out_err;
1375 if (unlikely(period_len & ((1 << reg_width) - 1)))
1376 goto out_err;
1377 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1378 goto out_err;
1379
1380 retval = ERR_PTR(-ENOMEM);
1381
1268 unsigned long was_cyclic;
1269 unsigned int reg_width;
1270 unsigned int periods;
1271 unsigned int i;
1272 unsigned long flags;
1273
1274 spin_lock_irqsave(&dwc->lock, flags);
1275 if (dwc->nollp) {

--- 37 unchanged lines hidden (view full) ---

1313 goto out_err;
1314 if (unlikely(period_len & ((1 << reg_width) - 1)))
1315 goto out_err;
1316 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1317 goto out_err;
1318
1319 retval = ERR_PTR(-ENOMEM);
1320
1382 if (periods > NR_DESCS_PER_CHANNEL)
1383 goto out_err;
1384
1385 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1386 if (!cdesc)
1387 goto out_err;
1388
1389 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1390 if (!cdesc->desc)
1391 goto out_err_alloc;
1392
1393 for (i = 0; i < periods; i++) {
1394 desc = dwc_desc_get(dwc);
1395 if (!desc)
1396 goto out_err_desc_get;
1397
1398 switch (direction) {
1399 case DMA_MEM_TO_DEV:
1321 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1322 if (!cdesc)
1323 goto out_err;
1324
1325 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1326 if (!cdesc->desc)
1327 goto out_err_alloc;
1328
1329 for (i = 0; i < periods; i++) {
1330 desc = dwc_desc_get(dwc);
1331 if (!desc)
1332 goto out_err_desc_get;
1333
1334 switch (direction) {
1335 case DMA_MEM_TO_DEV:
1400 desc->lli.dar = sconfig->dst_addr;
1401 desc->lli.sar = buf_addr + (period_len * i);
1402 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1403 | DWC_CTLL_DST_WIDTH(reg_width)
1404 | DWC_CTLL_SRC_WIDTH(reg_width)
1405 | DWC_CTLL_DST_FIX
1406 | DWC_CTLL_SRC_INC
1407 | DWC_CTLL_INT_EN);
1336 lli_write(desc, dar, sconfig->dst_addr);
1337 lli_write(desc, sar, buf_addr + period_len * i);
1338 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1339 | DWC_CTLL_DST_WIDTH(reg_width)
1340 | DWC_CTLL_SRC_WIDTH(reg_width)
1341 | DWC_CTLL_DST_FIX
1342 | DWC_CTLL_SRC_INC
1343 | DWC_CTLL_INT_EN));
1408
1344
1409 desc->lli.ctllo |= sconfig->device_fc ?
1410 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1411 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1345 lli_set(desc, ctllo, sconfig->device_fc ?
1346 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1347 DWC_CTLL_FC(DW_DMA_FC_D_M2P));
1412
1413 break;
1414 case DMA_DEV_TO_MEM:
1348
1349 break;
1350 case DMA_DEV_TO_MEM:
1415 desc->lli.dar = buf_addr + (period_len * i);
1416 desc->lli.sar = sconfig->src_addr;
1417 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1418 | DWC_CTLL_SRC_WIDTH(reg_width)
1419 | DWC_CTLL_DST_WIDTH(reg_width)
1420 | DWC_CTLL_DST_INC
1421 | DWC_CTLL_SRC_FIX
1422 | DWC_CTLL_INT_EN);
1351 lli_write(desc, dar, buf_addr + period_len * i);
1352 lli_write(desc, sar, sconfig->src_addr);
1353 lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
1354 | DWC_CTLL_SRC_WIDTH(reg_width)
1355 | DWC_CTLL_DST_WIDTH(reg_width)
1356 | DWC_CTLL_DST_INC
1357 | DWC_CTLL_SRC_FIX
1358 | DWC_CTLL_INT_EN));
1423
1359
1424 desc->lli.ctllo |= sconfig->device_fc ?
1425 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1426 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1360 lli_set(desc, ctllo, sconfig->device_fc ?
1361 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1362 DWC_CTLL_FC(DW_DMA_FC_D_P2M));
1427
1428 break;
1429 default:
1430 break;
1431 }
1432
1363
1364 break;
1365 default:
1366 break;
1367 }
1368
1433 desc->lli.ctlhi = (period_len >> reg_width);
1369 lli_write(desc, ctlhi, period_len >> reg_width);
1434 cdesc->desc[i] = desc;
1435
1436 if (last)
1370 cdesc->desc[i] = desc;
1371
1372 if (last)
1437 last->lli.llp = desc->txd.phys;
1373 lli_write(last, llp, desc->txd.phys | lms);
1438
1439 last = desc;
1440 }
1441
1442 /* Let's make a cyclic list */
1374
1375 last = desc;
1376 }
1377
1378 /* Let's make a cyclic list */
1443 last->lli.llp = cdesc->desc[0]->txd.phys;
1379 lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
1444
1445 dev_dbg(chan2dev(&dwc->chan),
1446 "cyclic prepared buf %pad len %zu period %zu periods %d\n",
1447 &buf_addr, buf_len, period_len, periods);
1448
1449 cdesc->periods = periods;
1450 dwc->cdesc = cdesc;
1451

--- 14 unchanged lines hidden (view full) ---

1466 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1467 * @chan: the DMA channel to free
1468 */
1469void dw_dma_cyclic_free(struct dma_chan *chan)
1470{
1471 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1472 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1473 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1380
1381 dev_dbg(chan2dev(&dwc->chan),
1382 "cyclic prepared buf %pad len %zu period %zu periods %d\n",
1383 &buf_addr, buf_len, period_len, periods);
1384
1385 cdesc->periods = periods;
1386 dwc->cdesc = cdesc;
1387

--- 14 unchanged lines hidden (view full) ---

1402 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1403 * @chan: the DMA channel to free
1404 */
1405void dw_dma_cyclic_free(struct dma_chan *chan)
1406{
1407 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1408 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1409 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1474 int i;
1410 unsigned int i;
1475 unsigned long flags;
1476
1477 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1478
1479 if (!cdesc)
1480 return;
1481
1482 spin_lock_irqsave(&dwc->lock, flags);

--- 7 unchanged lines hidden (view full) ---

1490 spin_unlock_irqrestore(&dwc->lock, flags);
1491
1492 for (i = 0; i < cdesc->periods; i++)
1493 dwc_desc_put(dwc, cdesc->desc[i]);
1494
1495 kfree(cdesc->desc);
1496 kfree(cdesc);
1497
1411 unsigned long flags;
1412
1413 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1414
1415 if (!cdesc)
1416 return;
1417
1418 spin_lock_irqsave(&dwc->lock, flags);

--- 7 unchanged lines hidden (view full) ---

1426 spin_unlock_irqrestore(&dwc->lock, flags);
1427
1428 for (i = 0; i < cdesc->periods; i++)
1429 dwc_desc_put(dwc, cdesc->desc[i]);
1430
1431 kfree(cdesc->desc);
1432 kfree(cdesc);
1433
1434 dwc->cdesc = NULL;
1435
1498 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1499}
1500EXPORT_SYMBOL(dw_dma_cyclic_free);
1501
1502/*----------------------------------------------------------------------*/
1503
1504int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1505{
1506 struct dw_dma *dw;
1507 bool autocfg = false;
1508 unsigned int dw_params;
1509 unsigned int max_blk_size = 0;
1436 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1437}
1438EXPORT_SYMBOL(dw_dma_cyclic_free);
1439
1440/*----------------------------------------------------------------------*/
1441
1442int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1443{
1444 struct dw_dma *dw;
1445 bool autocfg = false;
1446 unsigned int dw_params;
1447 unsigned int max_blk_size = 0;
1448 unsigned int i;
1510 int err;
1449 int err;
1511 int i;
1512
1513 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1514 if (!dw)
1515 return -ENOMEM;
1516
1517 dw->regs = chip->regs;
1518 chip->dw = dw;
1519
1520 pm_runtime_get_sync(chip->dev);
1521
1522 if (!pdata) {
1450
1451 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1452 if (!dw)
1453 return -ENOMEM;
1454
1455 dw->regs = chip->regs;
1456 chip->dw = dw;
1457
1458 pm_runtime_get_sync(chip->dev);
1459
1460 if (!pdata) {
1523 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
1461 dw_params = dma_readl(dw, DW_PARAMS);
1524 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1525
1526 autocfg = dw_params >> DW_PARAMS_EN & 1;
1527 if (!autocfg) {
1528 err = -EINVAL;
1529 goto err_pdata;
1530 }
1531
1532 pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
1533 if (!pdata) {
1534 err = -ENOMEM;
1535 goto err_pdata;
1536 }
1537
1538 /* Get hardware configuration parameters */
1539 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1540 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1541 for (i = 0; i < pdata->nr_masters; i++) {
1542 pdata->data_width[i] =
1462 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1463
1464 autocfg = dw_params >> DW_PARAMS_EN & 1;
1465 if (!autocfg) {
1466 err = -EINVAL;
1467 goto err_pdata;
1468 }
1469
1470 pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
1471 if (!pdata) {
1472 err = -ENOMEM;
1473 goto err_pdata;
1474 }
1475
1476 /* Get hardware configuration parameters */
1477 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1478 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1479 for (i = 0; i < pdata->nr_masters; i++) {
1480 pdata->data_width[i] =
1543 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
1481 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1544 }
1545 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1546
1547 /* Fill platform data with the default values */
1548 pdata->is_private = true;
1549 pdata->is_memcpy = true;
1550 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1551 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;

--- 55 unchanged lines hidden (view full) ---

1607 dwc->priority = i;
1608
1609 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1610 spin_lock_init(&dwc->lock);
1611 dwc->mask = 1 << i;
1612
1613 INIT_LIST_HEAD(&dwc->active_list);
1614 INIT_LIST_HEAD(&dwc->queue);
1482 }
1483 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1484
1485 /* Fill platform data with the default values */
1486 pdata->is_private = true;
1487 pdata->is_memcpy = true;
1488 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1489 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;

--- 55 unchanged lines hidden (view full) ---

1545 dwc->priority = i;
1546
1547 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1548 spin_lock_init(&dwc->lock);
1549 dwc->mask = 1 << i;
1550
1551 INIT_LIST_HEAD(&dwc->active_list);
1552 INIT_LIST_HEAD(&dwc->queue);
1615 INIT_LIST_HEAD(&dwc->free_list);
1616
1617 channel_clear_bit(dw, CH_EN, dwc->mask);
1618
1619 dwc->direction = DMA_TRANS_NONE;
1620
1621 /* Hardware configuration */
1622 if (autocfg) {
1553
1554 channel_clear_bit(dw, CH_EN, dwc->mask);
1555
1556 dwc->direction = DMA_TRANS_NONE;
1557
1558 /* Hardware configuration */
1559 if (autocfg) {
1623 unsigned int dwc_params;
1624 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1560 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1625 void __iomem *addr = chip->regs + r * sizeof(u32);
1561 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1562 unsigned int dwc_params = dma_readl_native(addr);
1626
1563
1627 dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
1628
1629 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1630 dwc_params);
1631
1632 /*
1633 * Decode maximum block size for given channel. The
1634 * stored 4 bit value represents blocks from 0x00 for 3
1635 * up to 0x0a for 4095.
1636 */
1637 dwc->block_size =
1638 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1639 dwc->nollp =
1640 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1641 } else {
1642 dwc->block_size = pdata->block_size;
1643
1644 /* Check if channel supports multi block transfer */
1564 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1565 dwc_params);
1566
1567 /*
1568 * Decode maximum block size for given channel. The
1569 * stored 4 bit value represents blocks from 0x00 for 3
1570 * up to 0x0a for 4095.
1571 */
1572 dwc->block_size =
1573 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1574 dwc->nollp =
1575 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1576 } else {
1577 dwc->block_size = pdata->block_size;
1578
1579 /* Check if channel supports multi block transfer */
1645 channel_writel(dwc, LLP, 0xfffffffc);
1646 dwc->nollp =
1647 (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
1580 channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff));
1581 dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0;
1648 channel_writel(dwc, LLP, 0);
1649 }
1650 }
1651
1652 /* Clear all interrupts on all channels. */
1653 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1654 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1655 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);

--- 97 unchanged lines hidden ---
1582 channel_writel(dwc, LLP, 0);
1583 }
1584 }
1585
1586 /* Clear all interrupts on all channels. */
1587 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1588 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1589 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);

--- 97 unchanged lines hidden ---