1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc 4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it> 5 6 #include <linux/cleanup.h> 7 #include <linux/clk.h> 8 #include <linux/dmapool.h> 9 #include <linux/module.h> 10 #include <linux/slab.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/pm_domain.h> 14 15 #include "fsl-edma-common.h" 16 17 #define EDMA_CR 0x00 18 #define EDMA_ES 0x04 19 #define EDMA_ERQ 0x0C 20 #define EDMA_EEI 0x14 21 #define EDMA_SERQ 0x1B 22 #define EDMA_CERQ 0x1A 23 #define EDMA_SEEI 0x19 24 #define EDMA_CEEI 0x18 25 #define EDMA_CINT 0x1F 26 #define EDMA_CERR 0x1E 27 #define EDMA_SSRT 0x1D 28 #define EDMA_CDNE 0x1C 29 #define EDMA_INTR 0x24 30 #define EDMA_ERR 0x2C 31 32 #define EDMA64_ERQH 0x08 33 #define EDMA64_EEIH 0x10 34 #define EDMA64_SERQ 0x18 35 #define EDMA64_CERQ 0x19 36 #define EDMA64_SEEI 0x1a 37 #define EDMA64_CEEI 0x1b 38 #define EDMA64_CINT 0x1c 39 #define EDMA64_CERR 0x1d 40 #define EDMA64_SSRT 0x1e 41 #define EDMA64_CDNE 0x1f 42 #define EDMA64_INTH 0x20 43 #define EDMA64_INTL 0x24 44 #define EDMA64_ERRH 0x28 45 #define EDMA64_ERRL 0x2c 46 47 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan) 48 { 49 spin_lock(&fsl_chan->vchan.lock); 50 51 if (!fsl_chan->edesc) { 52 /* terminate_all called before */ 53 spin_unlock(&fsl_chan->vchan.lock); 54 return; 55 } 56 57 if (!fsl_chan->edesc->iscyclic) { 58 list_del(&fsl_chan->edesc->vdesc.node); 59 vchan_cookie_complete(&fsl_chan->edesc->vdesc); 60 fsl_chan->edesc = NULL; 61 fsl_chan->status = DMA_COMPLETE; 62 } else { 63 vchan_cyclic_callback(&fsl_chan->edesc->vdesc); 64 } 65 66 if (!fsl_chan->edesc) 67 fsl_edma_xfer_desc(fsl_chan); 68 69 spin_unlock(&fsl_chan->vchan.lock); 70 } 71 72 static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) 73 { 74 u32 val, flags; 75 76 flags = fsl_edma_drvflags(fsl_chan); 77 val = edma_readl_chreg(fsl_chan, ch_sbr); 78 if (fsl_chan->is_rxchan) 79 val |= EDMA_V3_CH_SBR_RD; 80 else 81 val |= EDMA_V3_CH_SBR_WR; 82 83 if (fsl_chan->is_remote) 84 val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR); 85 86 edma_writel_chreg(fsl_chan, val, ch_sbr); 87 88 if (flags & FSL_EDMA_DRV_HAS_CHMUX) { 89 /* 90 * ch_mux: With the exception of 0, attempts to write a value 91 * already in use will be forced to 0. 92 */ 93 if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr)) 94 edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr); 95 } 96 97 val = edma_readl_chreg(fsl_chan, ch_csr); 98 val |= EDMA_V3_CH_CSR_ERQ; 99 edma_writel_chreg(fsl_chan, val, ch_csr); 100 } 101 102 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) 103 { 104 struct edma_regs *regs = &fsl_chan->edma->regs; 105 u32 ch = fsl_chan->vchan.chan.chan_id; 106 107 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG) 108 return fsl_edma3_enable_request(fsl_chan); 109 110 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) { 111 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); 112 edma_writeb(fsl_chan->edma, ch, regs->serq); 113 } else { 114 /* ColdFire is big endian, and accesses natively 115 * big endian I/O peripherals 116 */ 117 iowrite8(EDMA_SEEI_SEEI(ch), regs->seei); 118 iowrite8(ch, regs->serq); 119 } 120 } 121 122 static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan) 123 { 124 u32 val = edma_readl_chreg(fsl_chan, ch_csr); 125 u32 flags; 126 127 flags = fsl_edma_drvflags(fsl_chan); 128 129 if (flags & FSL_EDMA_DRV_HAS_CHMUX) 130 edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr); 131 132 val &= ~EDMA_V3_CH_CSR_ERQ; 133 edma_writel_chreg(fsl_chan, val, ch_csr); 134 } 135 136 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) 137 { 138 struct edma_regs *regs = &fsl_chan->edma->regs; 139 u32 ch = fsl_chan->vchan.chan.chan_id; 140 141 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG) 142 return fsl_edma3_disable_request(fsl_chan); 143 144 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) { 145 edma_writeb(fsl_chan->edma, ch, regs->cerq); 146 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); 147 } else { 148 /* ColdFire is big endian, and accesses natively 149 * big endian I/O peripherals 150 */ 151 iowrite8(ch, regs->cerq); 152 iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei); 153 } 154 } 155 156 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, 157 u32 off, u32 slot, bool enable) 158 { 159 u8 val8; 160 161 if (enable) 162 val8 = EDMAMUX_CHCFG_ENBL | slot; 163 else 164 val8 = EDMAMUX_CHCFG_DIS; 165 166 iowrite8(val8, addr + off); 167 } 168 169 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr, 170 u32 off, u32 slot, bool enable) 171 { 172 u32 val; 173 174 if (enable) 175 val = EDMAMUX_CHCFG_ENBL << 24 | slot; 176 else 177 val = EDMAMUX_CHCFG_DIS; 178 179 iowrite32(val, addr + off * 4); 180 } 181 182 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, 183 unsigned int slot, bool enable) 184 { 185 u32 ch = fsl_chan->vchan.chan.chan_id; 186 void __iomem *muxaddr; 187 unsigned int chans_per_mux, ch_off; 188 int endian_diff[4] = {3, 1, -1, -3}; 189 u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; 190 191 if (!dmamux_nr) 192 return; 193 194 chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; 195 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; 196 197 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP) 198 ch_off += endian_diff[ch_off % 4]; 199 200 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; 201 slot = EDMAMUX_CHCFG_SOURCE(slot); 202 203 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32) 204 mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable); 205 else 206 mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); 207 } 208 209 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) 210 { 211 u32 val; 212 213 if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 214 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 215 216 val = ffs(addr_width) - 1; 217 return val | (val << 8); 218 } 219 220 void fsl_edma_free_desc(struct virt_dma_desc *vdesc) 221 { 222 struct fsl_edma_desc *fsl_desc; 223 int i; 224 225 fsl_desc = to_fsl_edma_desc(vdesc); 226 for (i = 0; i < fsl_desc->n_tcds; i++) 227 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd, 228 fsl_desc->tcd[i].ptcd); 229 kfree(fsl_desc); 230 } 231 232 int fsl_edma_terminate_all(struct dma_chan *chan) 233 { 234 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 235 unsigned long flags; 236 LIST_HEAD(head); 237 238 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 239 fsl_edma_disable_request(fsl_chan); 240 fsl_chan->edesc = NULL; 241 fsl_chan->status = DMA_COMPLETE; 242 vchan_get_all_descriptors(&fsl_chan->vchan, &head); 243 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 244 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 245 246 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD) 247 pm_runtime_allow(fsl_chan->pd_dev); 248 249 return 0; 250 } 251 252 int fsl_edma_pause(struct dma_chan *chan) 253 { 254 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 255 unsigned long flags; 256 257 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 258 if (fsl_chan->edesc) { 259 fsl_edma_disable_request(fsl_chan); 260 fsl_chan->status = DMA_PAUSED; 261 } 262 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 263 return 0; 264 } 265 266 int fsl_edma_resume(struct dma_chan *chan) 267 { 268 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 269 unsigned long flags; 270 271 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 272 if (fsl_chan->edesc) { 273 fsl_edma_enable_request(fsl_chan); 274 fsl_chan->status = DMA_IN_PROGRESS; 275 } 276 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 277 return 0; 278 } 279 280 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan) 281 { 282 if (fsl_chan->dma_dir != DMA_NONE) 283 dma_unmap_resource(fsl_chan->vchan.chan.device->dev, 284 fsl_chan->dma_dev_addr, 285 fsl_chan->dma_dev_size, 286 fsl_chan->dma_dir, 0); 287 fsl_chan->dma_dir = DMA_NONE; 288 } 289 290 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan, 291 enum dma_transfer_direction dir) 292 { 293 struct device *dev = fsl_chan->vchan.chan.device->dev; 294 enum dma_data_direction dma_dir; 295 phys_addr_t addr = 0; 296 u32 size = 0; 297 298 switch (dir) { 299 case DMA_MEM_TO_DEV: 300 dma_dir = DMA_FROM_DEVICE; 301 addr = fsl_chan->cfg.dst_addr; 302 size = fsl_chan->cfg.dst_maxburst; 303 break; 304 case DMA_DEV_TO_MEM: 305 dma_dir = DMA_TO_DEVICE; 306 addr = fsl_chan->cfg.src_addr; 307 size = fsl_chan->cfg.src_maxburst; 308 break; 309 default: 310 dma_dir = DMA_NONE; 311 break; 312 } 313 314 /* Already mapped for this config? */ 315 if (fsl_chan->dma_dir == dma_dir) 316 return true; 317 318 fsl_edma_unprep_slave_dma(fsl_chan); 319 320 fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0); 321 if (dma_mapping_error(dev, fsl_chan->dma_dev_addr)) 322 return false; 323 fsl_chan->dma_dev_size = size; 324 fsl_chan->dma_dir = dma_dir; 325 326 return true; 327 } 328 329 int fsl_edma_slave_config(struct dma_chan *chan, 330 struct dma_slave_config *cfg) 331 { 332 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 333 334 memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); 335 fsl_edma_unprep_slave_dma(fsl_chan); 336 337 return 0; 338 } 339 340 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, 341 struct virt_dma_desc *vdesc, bool in_progress) 342 { 343 struct fsl_edma_desc *edesc = fsl_chan->edesc; 344 enum dma_transfer_direction dir = edesc->dirn; 345 dma_addr_t cur_addr, dma_addr, old_addr; 346 size_t len, size; 347 u32 nbytes = 0; 348 int i; 349 350 /* calculate the total size in this desc */ 351 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) { 352 nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes); 353 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) 354 nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); 355 len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter); 356 } 357 358 if (!in_progress) 359 return len; 360 361 /* 64bit read is not atomic, need read retry when high 32bit changed */ 362 do { 363 if (dir == DMA_MEM_TO_DEV) { 364 old_addr = edma_read_tcdreg(fsl_chan, saddr); 365 cur_addr = edma_read_tcdreg(fsl_chan, saddr); 366 } else { 367 old_addr = edma_read_tcdreg(fsl_chan, daddr); 368 cur_addr = edma_read_tcdreg(fsl_chan, daddr); 369 } 370 } while (upper_32_bits(cur_addr) != upper_32_bits(old_addr)); 371 372 /* figure out the finished and calculate the residue */ 373 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { 374 nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes); 375 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) 376 nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); 377 378 size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter); 379 380 if (dir == DMA_MEM_TO_DEV) 381 dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr); 382 else 383 dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr); 384 385 len -= size; 386 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { 387 len += dma_addr + size - cur_addr; 388 break; 389 } 390 } 391 392 return len; 393 } 394 395 enum dma_status fsl_edma_tx_status(struct dma_chan *chan, 396 dma_cookie_t cookie, struct dma_tx_state *txstate) 397 { 398 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 399 struct virt_dma_desc *vdesc; 400 enum dma_status status; 401 unsigned long flags; 402 403 status = dma_cookie_status(chan, cookie, txstate); 404 if (status == DMA_COMPLETE) 405 return status; 406 407 if (!txstate) 408 return fsl_chan->status; 409 410 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 411 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie); 412 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie) 413 txstate->residue = 414 fsl_edma_desc_residue(fsl_chan, vdesc, true); 415 else if (vdesc) 416 txstate->residue = 417 fsl_edma_desc_residue(fsl_chan, vdesc, false); 418 else 419 txstate->residue = 0; 420 421 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 422 423 return fsl_chan->status; 424 } 425 426 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd) 427 { 428 u16 csr = 0; 429 430 /* 431 * TCD parameters are stored in struct fsl_edma_hw_tcd in little 432 * endian format. However, we need to load the TCD registers in 433 * big- or little-endian obeying the eDMA engine model endian, 434 * and this is performed from specific edma_write functions 435 */ 436 edma_write_tcdreg(fsl_chan, 0, csr); 437 438 edma_cp_tcd_to_reg(fsl_chan, tcd, saddr); 439 edma_cp_tcd_to_reg(fsl_chan, tcd, daddr); 440 441 edma_cp_tcd_to_reg(fsl_chan, tcd, attr); 442 edma_cp_tcd_to_reg(fsl_chan, tcd, soff); 443 444 edma_cp_tcd_to_reg(fsl_chan, tcd, nbytes); 445 edma_cp_tcd_to_reg(fsl_chan, tcd, slast); 446 447 edma_cp_tcd_to_reg(fsl_chan, tcd, citer); 448 edma_cp_tcd_to_reg(fsl_chan, tcd, biter); 449 edma_cp_tcd_to_reg(fsl_chan, tcd, doff); 450 451 edma_cp_tcd_to_reg(fsl_chan, tcd, dlast_sga); 452 453 csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr); 454 455 if (fsl_chan->is_sw) { 456 csr |= EDMA_TCD_CSR_START; 457 fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); 458 } 459 460 /* 461 * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3 462 * eDMAv4 have not such requirement. 463 * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4. 464 */ 465 if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) && 466 (csr & EDMA_TCD_CSR_E_SG)) || 467 ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) && 468 (csr & EDMA_TCD_CSR_E_LINK))) 469 edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr); 470 471 472 edma_cp_tcd_to_reg(fsl_chan, tcd, csr); 473 } 474 475 static inline 476 void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, 477 struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst, 478 u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer, 479 u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int, 480 bool disable_req, bool enable_sg) 481 { 482 struct dma_slave_config *cfg = &fsl_chan->cfg; 483 u32 burst = 0; 484 u16 csr = 0; 485 486 /* 487 * eDMA hardware SGs require the TCDs to be stored in little 488 * endian format irrespective of the register endian model. 489 * So we put the value in little endian in memory, waiting 490 * for fsl_edma_set_tcd_regs doing the swap. 491 */ 492 fsl_edma_set_tcd_to_le(fsl_chan, tcd, src, saddr); 493 fsl_edma_set_tcd_to_le(fsl_chan, tcd, dst, daddr); 494 495 fsl_edma_set_tcd_to_le(fsl_chan, tcd, attr, attr); 496 497 fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff); 498 499 /* If we expect to have either multi_fifo or a port window size, 500 * we will use minor loop offset, meaning bits 29-10 will be used for 501 * address offset, while bits 9-0 will be used to tell DMA how much 502 * data to read from addr. 503 * If we don't have either of those, will use a major loop reading from addr 504 * nbytes (29bits). 505 */ 506 if (cfg->direction == DMA_MEM_TO_DEV) { 507 if (fsl_chan->is_multi_fifo) 508 burst = cfg->dst_maxburst * 4; 509 if (cfg->dst_port_window_size) 510 burst = cfg->dst_port_window_size * cfg->dst_addr_width; 511 if (burst) { 512 nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst); 513 nbytes |= EDMA_V3_TCD_NBYTES_DMLOE; 514 nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE; 515 } 516 } else { 517 if (fsl_chan->is_multi_fifo) 518 burst = cfg->src_maxburst * 4; 519 if (cfg->src_port_window_size) 520 burst = cfg->src_port_window_size * cfg->src_addr_width; 521 if (burst) { 522 nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst); 523 nbytes |= EDMA_V3_TCD_NBYTES_SMLOE; 524 nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE; 525 } 526 } 527 528 fsl_edma_set_tcd_to_le(fsl_chan, tcd, nbytes, nbytes); 529 fsl_edma_set_tcd_to_le(fsl_chan, tcd, slast, slast); 530 531 fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer); 532 fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff); 533 534 fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga); 535 536 fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter); 537 538 if (major_int) 539 csr |= EDMA_TCD_CSR_INT_MAJOR; 540 541 if (disable_req) 542 csr |= EDMA_TCD_CSR_D_REQ; 543 544 if (enable_sg) 545 csr |= EDMA_TCD_CSR_E_SG; 546 547 if (fsl_chan->is_rxchan) 548 csr |= EDMA_TCD_CSR_ACTIVE; 549 550 if (fsl_chan->is_sw) 551 csr |= EDMA_TCD_CSR_START; 552 553 fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); 554 555 trace_edma_fill_tcd(fsl_chan, tcd); 556 } 557 558 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, 559 int sg_len) 560 { 561 struct fsl_edma_desc *fsl_desc; 562 int i; 563 564 fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT); 565 if (!fsl_desc) 566 return NULL; 567 568 fsl_desc->echan = fsl_chan; 569 fsl_desc->n_tcds = sg_len; 570 for (i = 0; i < sg_len; i++) { 571 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool, 572 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd); 573 if (!fsl_desc->tcd[i].vtcd) 574 goto err; 575 } 576 return fsl_desc; 577 578 err: 579 while (--i >= 0) 580 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd, 581 fsl_desc->tcd[i].ptcd); 582 kfree(fsl_desc); 583 return NULL; 584 } 585 586 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( 587 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 588 size_t period_len, enum dma_transfer_direction direction, 589 unsigned long flags) 590 { 591 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 592 struct fsl_edma_desc *fsl_desc; 593 dma_addr_t dma_buf_next; 594 bool major_int = true; 595 int sg_len, i; 596 dma_addr_t src_addr, dst_addr, last_sg; 597 u16 soff, doff, iter; 598 u32 nbytes; 599 600 if (!is_slave_direction(direction)) 601 return NULL; 602 603 if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) 604 return NULL; 605 606 sg_len = buf_len / period_len; 607 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); 608 if (!fsl_desc) 609 return NULL; 610 fsl_desc->iscyclic = true; 611 fsl_desc->dirn = direction; 612 613 dma_buf_next = dma_addr; 614 if (direction == DMA_MEM_TO_DEV) { 615 fsl_chan->attr = 616 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); 617 nbytes = fsl_chan->cfg.dst_addr_width * 618 fsl_chan->cfg.dst_maxburst; 619 } else { 620 fsl_chan->attr = 621 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); 622 nbytes = fsl_chan->cfg.src_addr_width * 623 fsl_chan->cfg.src_maxburst; 624 } 625 626 iter = period_len / nbytes; 627 628 for (i = 0; i < sg_len; i++) { 629 if (dma_buf_next >= dma_addr + buf_len) 630 dma_buf_next = dma_addr; 631 632 /* get next sg's physical address */ 633 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; 634 635 if (direction == DMA_MEM_TO_DEV) { 636 src_addr = dma_buf_next; 637 dst_addr = fsl_chan->dma_dev_addr; 638 soff = fsl_chan->cfg.dst_addr_width; 639 doff = fsl_chan->is_multi_fifo ? 4 : 0; 640 if (fsl_chan->cfg.dst_port_window_size) 641 doff = fsl_chan->cfg.dst_addr_width; 642 } else if (direction == DMA_DEV_TO_MEM) { 643 src_addr = fsl_chan->dma_dev_addr; 644 dst_addr = dma_buf_next; 645 soff = fsl_chan->is_multi_fifo ? 4 : 0; 646 doff = fsl_chan->cfg.src_addr_width; 647 if (fsl_chan->cfg.src_port_window_size) 648 soff = fsl_chan->cfg.src_addr_width; 649 } else { 650 /* DMA_DEV_TO_DEV */ 651 src_addr = fsl_chan->cfg.src_addr; 652 dst_addr = fsl_chan->cfg.dst_addr; 653 soff = doff = 0; 654 major_int = false; 655 } 656 657 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr, 658 fsl_chan->attr, soff, nbytes, 0, iter, 659 iter, doff, last_sg, major_int, false, true); 660 dma_buf_next += period_len; 661 } 662 663 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 664 } 665 666 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( 667 struct dma_chan *chan, struct scatterlist *sgl, 668 unsigned int sg_len, enum dma_transfer_direction direction, 669 unsigned long flags, void *context) 670 { 671 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 672 struct fsl_edma_desc *fsl_desc; 673 struct scatterlist *sg; 674 dma_addr_t src_addr, dst_addr, last_sg; 675 u16 soff, doff, iter; 676 u32 nbytes; 677 int i; 678 679 if (!is_slave_direction(direction)) 680 return NULL; 681 682 if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) 683 return NULL; 684 685 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); 686 if (!fsl_desc) 687 return NULL; 688 fsl_desc->iscyclic = false; 689 fsl_desc->dirn = direction; 690 691 if (direction == DMA_MEM_TO_DEV) { 692 fsl_chan->attr = 693 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width); 694 nbytes = fsl_chan->cfg.dst_addr_width * 695 fsl_chan->cfg.dst_maxburst; 696 } else { 697 fsl_chan->attr = 698 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width); 699 nbytes = fsl_chan->cfg.src_addr_width * 700 fsl_chan->cfg.src_maxburst; 701 } 702 703 for_each_sg(sgl, sg, sg_len, i) { 704 if (direction == DMA_MEM_TO_DEV) { 705 src_addr = sg_dma_address(sg); 706 dst_addr = fsl_chan->dma_dev_addr; 707 soff = fsl_chan->cfg.dst_addr_width; 708 doff = 0; 709 } else if (direction == DMA_DEV_TO_MEM) { 710 src_addr = fsl_chan->dma_dev_addr; 711 dst_addr = sg_dma_address(sg); 712 soff = 0; 713 doff = fsl_chan->cfg.src_addr_width; 714 } else { 715 /* DMA_DEV_TO_DEV */ 716 src_addr = fsl_chan->cfg.src_addr; 717 dst_addr = fsl_chan->cfg.dst_addr; 718 soff = 0; 719 doff = 0; 720 } 721 722 /* 723 * Choose the suitable burst length if sg_dma_len is not 724 * multiple of burst length so that the whole transfer length is 725 * multiple of minor loop(burst length). 726 */ 727 if (sg_dma_len(sg) % nbytes) { 728 u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff; 729 u32 burst = (direction == DMA_DEV_TO_MEM) ? 730 fsl_chan->cfg.src_maxburst : 731 fsl_chan->cfg.dst_maxburst; 732 int j; 733 734 for (j = burst; j > 1; j--) { 735 if (!(sg_dma_len(sg) % (j * width))) { 736 nbytes = j * width; 737 break; 738 } 739 } 740 /* Set burst size as 1 if there's no suitable one */ 741 if (j == 1) 742 nbytes = width; 743 } 744 iter = sg_dma_len(sg) / nbytes; 745 if (i < sg_len - 1) { 746 last_sg = fsl_desc->tcd[(i + 1)].ptcd; 747 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, 748 dst_addr, fsl_chan->attr, soff, 749 nbytes, 0, iter, iter, doff, last_sg, 750 false, false, true); 751 } else { 752 last_sg = 0; 753 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, 754 dst_addr, fsl_chan->attr, soff, 755 nbytes, 0, iter, iter, doff, last_sg, 756 true, true, false); 757 } 758 } 759 760 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 761 } 762 763 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan, 764 dma_addr_t dma_dst, dma_addr_t dma_src, 765 size_t len, unsigned long flags) 766 { 767 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 768 struct fsl_edma_desc *fsl_desc; 769 770 fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1); 771 if (!fsl_desc) 772 return NULL; 773 fsl_desc->iscyclic = false; 774 775 fsl_chan->is_sw = true; 776 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE) 777 fsl_chan->is_remote = true; 778 779 /* To match with copy_align and max_seg_size so 1 tcd is enough */ 780 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst, 781 fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES), 782 32, len, 0, 1, 1, 32, 0, true, true, false); 783 784 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 785 } 786 787 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) 788 { 789 struct virt_dma_desc *vdesc; 790 791 lockdep_assert_held(&fsl_chan->vchan.lock); 792 793 vdesc = vchan_next_desc(&fsl_chan->vchan); 794 if (!vdesc) 795 return; 796 fsl_chan->edesc = to_fsl_edma_desc(vdesc); 797 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); 798 fsl_edma_enable_request(fsl_chan); 799 fsl_chan->status = DMA_IN_PROGRESS; 800 } 801 802 void fsl_edma_issue_pending(struct dma_chan *chan) 803 { 804 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 805 unsigned long flags; 806 807 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 808 809 if (unlikely(fsl_chan->pm_state != RUNNING)) { 810 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 811 /* cannot submit due to suspend */ 812 return; 813 } 814 815 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) 816 fsl_edma_xfer_desc(fsl_chan); 817 818 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 819 } 820 821 int fsl_edma_alloc_chan_resources(struct dma_chan *chan) 822 { 823 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 824 int ret; 825 826 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) 827 clk_prepare_enable(fsl_chan->clk); 828 829 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, 830 fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ? 831 sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd), 832 32, 0); 833 834 if (fsl_chan->txirq) { 835 ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED, 836 fsl_chan->chan_name, fsl_chan); 837 838 if (ret) { 839 dma_pool_destroy(fsl_chan->tcd_pool); 840 return ret; 841 } 842 } 843 844 return 0; 845 } 846 847 void fsl_edma_free_chan_resources(struct dma_chan *chan) 848 { 849 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 850 struct fsl_edma_engine *edma = fsl_chan->edma; 851 unsigned long flags; 852 LIST_HEAD(head); 853 854 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 855 fsl_edma_disable_request(fsl_chan); 856 if (edma->drvdata->dmamuxs) 857 fsl_edma_chan_mux(fsl_chan, 0, false); 858 fsl_chan->edesc = NULL; 859 vchan_get_all_descriptors(&fsl_chan->vchan, &head); 860 fsl_edma_unprep_slave_dma(fsl_chan); 861 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 862 863 if (fsl_chan->txirq) 864 free_irq(fsl_chan->txirq, fsl_chan); 865 866 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 867 dma_pool_destroy(fsl_chan->tcd_pool); 868 fsl_chan->tcd_pool = NULL; 869 fsl_chan->is_sw = false; 870 fsl_chan->srcid = 0; 871 fsl_chan->is_remote = false; 872 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) 873 clk_disable_unprepare(fsl_chan->clk); 874 } 875 876 void fsl_edma_cleanup_vchan(struct dma_device *dmadev) 877 { 878 struct fsl_edma_chan *chan, *_chan; 879 880 list_for_each_entry_safe(chan, _chan, 881 &dmadev->channels, vchan.chan.device_node) { 882 list_del(&chan->vchan.chan.device_node); 883 tasklet_kill(&chan->vchan.task); 884 } 885 } 886 887 /* 888 * On the 32 channels Vybrid/mpc577x edma version, register offsets are 889 * different compared to ColdFire mcf5441x 64 channels edma. 890 * 891 * This function sets up register offsets as per proper declared version 892 * so must be called in xxx_edma_probe() just after setting the 893 * edma "version" and "membase" appropriately. 894 */ 895 void fsl_edma_setup_regs(struct fsl_edma_engine *edma) 896 { 897 bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64); 898 899 edma->regs.cr = edma->membase + EDMA_CR; 900 edma->regs.es = edma->membase + EDMA_ES; 901 edma->regs.erql = edma->membase + EDMA_ERQ; 902 edma->regs.eeil = edma->membase + EDMA_EEI; 903 904 edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ); 905 edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ); 906 edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI); 907 edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI); 908 edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT); 909 edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR); 910 edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT); 911 edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE); 912 edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR); 913 edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR); 914 915 if (is64) { 916 edma->regs.erqh = edma->membase + EDMA64_ERQH; 917 edma->regs.eeih = edma->membase + EDMA64_EEIH; 918 edma->regs.errh = edma->membase + EDMA64_ERRH; 919 edma->regs.inth = edma->membase + EDMA64_INTH; 920 } 921 } 922 923 MODULE_LICENSE("GPL v2"); 924