1 /* 2 * Texas Instruments CPDMA Driver 3 * 4 * Copyright (C) 2010 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 #include <linux/kernel.h> 16 #include <linux/spinlock.h> 17 #include <linux/device.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/io.h> 23 #include <linux/delay.h> 24 25 #include "davinci_cpdma.h" 26 27 /* DMA Registers */ 28 #define CPDMA_TXIDVER 0x00 29 #define CPDMA_TXCONTROL 0x04 30 #define CPDMA_TXTEARDOWN 0x08 31 #define CPDMA_RXIDVER 0x10 32 #define CPDMA_RXCONTROL 0x14 33 #define CPDMA_SOFTRESET 0x1c 34 #define CPDMA_RXTEARDOWN 0x18 35 #define CPDMA_TXINTSTATRAW 0x80 36 #define CPDMA_TXINTSTATMASKED 0x84 37 #define CPDMA_TXINTMASKSET 0x88 38 #define CPDMA_TXINTMASKCLEAR 0x8c 39 #define CPDMA_MACINVECTOR 0x90 40 #define CPDMA_MACEOIVECTOR 0x94 41 #define CPDMA_RXINTSTATRAW 0xa0 42 #define CPDMA_RXINTSTATMASKED 0xa4 43 #define CPDMA_RXINTMASKSET 0xa8 44 #define CPDMA_RXINTMASKCLEAR 0xac 45 #define CPDMA_DMAINTSTATRAW 0xb0 46 #define CPDMA_DMAINTSTATMASKED 0xb4 47 #define CPDMA_DMAINTMASKSET 0xb8 48 #define CPDMA_DMAINTMASKCLEAR 0xbc 49 #define CPDMA_DMAINT_HOSTERR BIT(1) 50 51 /* the following exist only if has_ext_regs is set */ 52 #define CPDMA_DMACONTROL 0x20 53 #define CPDMA_DMASTATUS 0x24 54 #define CPDMA_RXBUFFOFS 0x28 55 #define CPDMA_EM_CONTROL 0x2c 56 57 /* Descriptor mode bits */ 58 #define CPDMA_DESC_SOP BIT(31) 59 #define CPDMA_DESC_EOP BIT(30) 60 #define CPDMA_DESC_OWNER BIT(29) 61 #define CPDMA_DESC_EOQ BIT(28) 62 #define CPDMA_DESC_TD_COMPLETE BIT(27) 63 #define CPDMA_DESC_PASS_CRC BIT(26) 64 #define CPDMA_DESC_TO_PORT_EN BIT(20) 65 #define CPDMA_TO_PORT_SHIFT 16 66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) 67 #define CPDMA_DESC_CRC_LEN 4 68 69 #define CPDMA_TEARDOWN_VALUE 0xfffffffc 70 71 struct cpdma_desc { 72 /* hardware fields */ 73 u32 hw_next; 74 u32 hw_buffer; 75 u32 hw_len; 76 u32 hw_mode; 77 /* software fields */ 78 void *sw_token; 79 u32 sw_buffer; 80 u32 sw_len; 81 }; 82 83 struct cpdma_desc_pool { 84 phys_addr_t phys; 85 u32 hw_addr; 86 void __iomem *iomap; /* ioremap map */ 87 void *cpumap; /* dma_alloc map */ 88 int desc_size, mem_size; 89 int num_desc, used_desc; 90 unsigned long *bitmap; 91 struct device *dev; 92 spinlock_t lock; 93 }; 94 95 enum cpdma_state { 96 CPDMA_STATE_IDLE, 97 CPDMA_STATE_ACTIVE, 98 CPDMA_STATE_TEARDOWN, 99 }; 100 101 static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; 102 103 struct cpdma_ctlr { 104 enum cpdma_state state; 105 struct cpdma_params params; 106 struct device *dev; 107 struct cpdma_desc_pool *pool; 108 spinlock_t lock; 109 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; 110 }; 111 112 struct cpdma_chan { 113 struct cpdma_desc __iomem *head, *tail; 114 void __iomem *hdp, *cp, *rxfree; 115 enum cpdma_state state; 116 struct cpdma_ctlr *ctlr; 117 int chan_num; 118 spinlock_t lock; 119 int count; 120 u32 mask; 121 cpdma_handler_fn handler; 122 enum dma_data_direction dir; 123 struct cpdma_chan_stats stats; 124 /* offsets into dmaregs */ 125 int int_set, int_clear, td; 126 }; 127 128 /* The following make access to common cpdma_ctlr params more readable */ 129 #define dmaregs params.dmaregs 130 #define num_chan params.num_chan 131 132 /* various accessors */ 133 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) 134 #define chan_read(chan, fld) __raw_readl((chan)->fld) 135 #define desc_read(desc, fld) __raw_readl(&(desc)->fld) 136 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) 137 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) 138 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) 139 140 #define cpdma_desc_to_port(chan, mode, directed) \ 141 do { \ 142 if (!is_rx_chan(chan) && ((directed == 1) || \ 143 (directed == 2))) \ 144 mode |= (CPDMA_DESC_TO_PORT_EN | \ 145 (directed << CPDMA_TO_PORT_SHIFT)); \ 146 } while (0) 147 148 /* 149 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 150 * emac) have dedicated on-chip memory for these descriptors. Some other 151 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools 152 * abstract out these details 153 */ 154 static struct cpdma_desc_pool * 155 cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, 156 int size, int align) 157 { 158 int bitmap_size; 159 struct cpdma_desc_pool *pool; 160 161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); 162 if (!pool) 163 goto fail; 164 165 spin_lock_init(&pool->lock); 166 167 pool->dev = dev; 168 pool->mem_size = size; 169 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); 170 pool->num_desc = size / pool->desc_size; 171 172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); 173 pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); 174 if (!pool->bitmap) 175 goto fail; 176 177 if (phys) { 178 pool->phys = phys; 179 pool->iomap = ioremap(phys, size); 180 pool->hw_addr = hw_addr; 181 } else { 182 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, 183 GFP_KERNEL); 184 pool->iomap = pool->cpumap; 185 pool->hw_addr = pool->phys; 186 } 187 188 if (pool->iomap) 189 return pool; 190 fail: 191 return NULL; 192 } 193 194 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) 195 { 196 unsigned long flags; 197 198 if (!pool) 199 return; 200 201 spin_lock_irqsave(&pool->lock, flags); 202 WARN_ON(pool->used_desc); 203 if (pool->cpumap) { 204 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 205 pool->phys); 206 } else { 207 iounmap(pool->iomap); 208 } 209 spin_unlock_irqrestore(&pool->lock, flags); 210 } 211 212 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 213 struct cpdma_desc __iomem *desc) 214 { 215 if (!desc) 216 return 0; 217 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; 218 } 219 220 static inline struct cpdma_desc __iomem * 221 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 222 { 223 return dma ? pool->iomap + dma - pool->hw_addr : NULL; 224 } 225 226 static struct cpdma_desc __iomem * 227 cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) 228 { 229 unsigned long flags; 230 int index; 231 int desc_start; 232 int desc_end; 233 struct cpdma_desc __iomem *desc = NULL; 234 235 spin_lock_irqsave(&pool->lock, flags); 236 237 if (is_rx) { 238 desc_start = 0; 239 desc_end = pool->num_desc/2; 240 } else { 241 desc_start = pool->num_desc/2; 242 desc_end = pool->num_desc; 243 } 244 245 index = bitmap_find_next_zero_area(pool->bitmap, 246 desc_end, desc_start, num_desc, 0); 247 if (index < desc_end) { 248 bitmap_set(pool->bitmap, index, num_desc); 249 desc = pool->iomap + pool->desc_size * index; 250 pool->used_desc++; 251 } 252 253 spin_unlock_irqrestore(&pool->lock, flags); 254 return desc; 255 } 256 257 static void cpdma_desc_free(struct cpdma_desc_pool *pool, 258 struct cpdma_desc __iomem *desc, int num_desc) 259 { 260 unsigned long flags, index; 261 262 index = ((unsigned long)desc - (unsigned long)pool->iomap) / 263 pool->desc_size; 264 spin_lock_irqsave(&pool->lock, flags); 265 bitmap_clear(pool->bitmap, index, num_desc); 266 pool->used_desc--; 267 spin_unlock_irqrestore(&pool->lock, flags); 268 } 269 270 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 271 { 272 struct cpdma_ctlr *ctlr; 273 274 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); 275 if (!ctlr) 276 return NULL; 277 278 ctlr->state = CPDMA_STATE_IDLE; 279 ctlr->params = *params; 280 ctlr->dev = params->dev; 281 spin_lock_init(&ctlr->lock); 282 283 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 284 ctlr->params.desc_mem_phys, 285 ctlr->params.desc_hw_addr, 286 ctlr->params.desc_mem_size, 287 ctlr->params.desc_align); 288 if (!ctlr->pool) 289 return NULL; 290 291 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 292 ctlr->num_chan = CPDMA_MAX_CHANNELS; 293 return ctlr; 294 } 295 EXPORT_SYMBOL_GPL(cpdma_ctlr_create); 296 297 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 298 { 299 unsigned long flags; 300 int i; 301 302 spin_lock_irqsave(&ctlr->lock, flags); 303 if (ctlr->state != CPDMA_STATE_IDLE) { 304 spin_unlock_irqrestore(&ctlr->lock, flags); 305 return -EBUSY; 306 } 307 308 if (ctlr->params.has_soft_reset) { 309 unsigned timeout = 10 * 100; 310 311 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 312 while (timeout) { 313 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 314 break; 315 udelay(10); 316 timeout--; 317 } 318 WARN_ON(!timeout); 319 } 320 321 for (i = 0; i < ctlr->num_chan; i++) { 322 __raw_writel(0, ctlr->params.txhdp + 4 * i); 323 __raw_writel(0, ctlr->params.rxhdp + 4 * i); 324 __raw_writel(0, ctlr->params.txcp + 4 * i); 325 __raw_writel(0, ctlr->params.rxcp + 4 * i); 326 } 327 328 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 329 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 330 331 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); 332 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); 333 334 ctlr->state = CPDMA_STATE_ACTIVE; 335 336 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 337 if (ctlr->channels[i]) 338 cpdma_chan_start(ctlr->channels[i]); 339 } 340 spin_unlock_irqrestore(&ctlr->lock, flags); 341 return 0; 342 } 343 EXPORT_SYMBOL_GPL(cpdma_ctlr_start); 344 345 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 346 { 347 unsigned long flags; 348 int i; 349 350 spin_lock_irqsave(&ctlr->lock, flags); 351 if (ctlr->state == CPDMA_STATE_TEARDOWN) { 352 spin_unlock_irqrestore(&ctlr->lock, flags); 353 return -EINVAL; 354 } 355 356 ctlr->state = CPDMA_STATE_TEARDOWN; 357 358 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 359 if (ctlr->channels[i]) 360 cpdma_chan_stop(ctlr->channels[i]); 361 } 362 363 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 364 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 365 366 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); 367 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); 368 369 ctlr->state = CPDMA_STATE_IDLE; 370 371 spin_unlock_irqrestore(&ctlr->lock, flags); 372 return 0; 373 } 374 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); 375 376 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) 377 { 378 struct device *dev = ctlr->dev; 379 unsigned long flags; 380 int i; 381 382 spin_lock_irqsave(&ctlr->lock, flags); 383 384 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); 385 386 dev_info(dev, "CPDMA: txidver: %x", 387 dma_reg_read(ctlr, CPDMA_TXIDVER)); 388 dev_info(dev, "CPDMA: txcontrol: %x", 389 dma_reg_read(ctlr, CPDMA_TXCONTROL)); 390 dev_info(dev, "CPDMA: txteardown: %x", 391 dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); 392 dev_info(dev, "CPDMA: rxidver: %x", 393 dma_reg_read(ctlr, CPDMA_RXIDVER)); 394 dev_info(dev, "CPDMA: rxcontrol: %x", 395 dma_reg_read(ctlr, CPDMA_RXCONTROL)); 396 dev_info(dev, "CPDMA: softreset: %x", 397 dma_reg_read(ctlr, CPDMA_SOFTRESET)); 398 dev_info(dev, "CPDMA: rxteardown: %x", 399 dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); 400 dev_info(dev, "CPDMA: txintstatraw: %x", 401 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); 402 dev_info(dev, "CPDMA: txintstatmasked: %x", 403 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); 404 dev_info(dev, "CPDMA: txintmaskset: %x", 405 dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); 406 dev_info(dev, "CPDMA: txintmaskclear: %x", 407 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); 408 dev_info(dev, "CPDMA: macinvector: %x", 409 dma_reg_read(ctlr, CPDMA_MACINVECTOR)); 410 dev_info(dev, "CPDMA: maceoivector: %x", 411 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); 412 dev_info(dev, "CPDMA: rxintstatraw: %x", 413 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); 414 dev_info(dev, "CPDMA: rxintstatmasked: %x", 415 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); 416 dev_info(dev, "CPDMA: rxintmaskset: %x", 417 dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); 418 dev_info(dev, "CPDMA: rxintmaskclear: %x", 419 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); 420 dev_info(dev, "CPDMA: dmaintstatraw: %x", 421 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); 422 dev_info(dev, "CPDMA: dmaintstatmasked: %x", 423 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); 424 dev_info(dev, "CPDMA: dmaintmaskset: %x", 425 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); 426 dev_info(dev, "CPDMA: dmaintmaskclear: %x", 427 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); 428 429 if (!ctlr->params.has_ext_regs) { 430 dev_info(dev, "CPDMA: dmacontrol: %x", 431 dma_reg_read(ctlr, CPDMA_DMACONTROL)); 432 dev_info(dev, "CPDMA: dmastatus: %x", 433 dma_reg_read(ctlr, CPDMA_DMASTATUS)); 434 dev_info(dev, "CPDMA: rxbuffofs: %x", 435 dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); 436 } 437 438 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 439 if (ctlr->channels[i]) 440 cpdma_chan_dump(ctlr->channels[i]); 441 442 spin_unlock_irqrestore(&ctlr->lock, flags); 443 return 0; 444 } 445 EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); 446 447 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 448 { 449 unsigned long flags; 450 int ret = 0, i; 451 452 if (!ctlr) 453 return -EINVAL; 454 455 spin_lock_irqsave(&ctlr->lock, flags); 456 if (ctlr->state != CPDMA_STATE_IDLE) 457 cpdma_ctlr_stop(ctlr); 458 459 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 460 cpdma_chan_destroy(ctlr->channels[i]); 461 462 cpdma_desc_pool_destroy(ctlr->pool); 463 spin_unlock_irqrestore(&ctlr->lock, flags); 464 return ret; 465 } 466 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 467 468 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 469 { 470 unsigned long flags; 471 int i, reg; 472 473 spin_lock_irqsave(&ctlr->lock, flags); 474 if (ctlr->state != CPDMA_STATE_ACTIVE) { 475 spin_unlock_irqrestore(&ctlr->lock, flags); 476 return -EINVAL; 477 } 478 479 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; 480 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); 481 482 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 483 if (ctlr->channels[i]) 484 cpdma_chan_int_ctrl(ctlr->channels[i], enable); 485 } 486 487 spin_unlock_irqrestore(&ctlr->lock, flags); 488 return 0; 489 } 490 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); 491 492 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) 493 { 494 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); 495 } 496 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); 497 498 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, 499 cpdma_handler_fn handler) 500 { 501 struct cpdma_chan *chan; 502 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; 503 unsigned long flags; 504 505 if (__chan_linear(chan_num) >= ctlr->num_chan) 506 return NULL; 507 508 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); 509 if (!chan) 510 return ERR_PTR(-ENOMEM); 511 512 spin_lock_irqsave(&ctlr->lock, flags); 513 if (ctlr->channels[chan_num]) { 514 spin_unlock_irqrestore(&ctlr->lock, flags); 515 devm_kfree(ctlr->dev, chan); 516 return ERR_PTR(-EBUSY); 517 } 518 519 chan->ctlr = ctlr; 520 chan->state = CPDMA_STATE_IDLE; 521 chan->chan_num = chan_num; 522 chan->handler = handler; 523 524 if (is_rx_chan(chan)) { 525 chan->hdp = ctlr->params.rxhdp + offset; 526 chan->cp = ctlr->params.rxcp + offset; 527 chan->rxfree = ctlr->params.rxfree + offset; 528 chan->int_set = CPDMA_RXINTMASKSET; 529 chan->int_clear = CPDMA_RXINTMASKCLEAR; 530 chan->td = CPDMA_RXTEARDOWN; 531 chan->dir = DMA_FROM_DEVICE; 532 } else { 533 chan->hdp = ctlr->params.txhdp + offset; 534 chan->cp = ctlr->params.txcp + offset; 535 chan->int_set = CPDMA_TXINTMASKSET; 536 chan->int_clear = CPDMA_TXINTMASKCLEAR; 537 chan->td = CPDMA_TXTEARDOWN; 538 chan->dir = DMA_TO_DEVICE; 539 } 540 chan->mask = BIT(chan_linear(chan)); 541 542 spin_lock_init(&chan->lock); 543 544 ctlr->channels[chan_num] = chan; 545 spin_unlock_irqrestore(&ctlr->lock, flags); 546 return chan; 547 } 548 EXPORT_SYMBOL_GPL(cpdma_chan_create); 549 550 int cpdma_chan_destroy(struct cpdma_chan *chan) 551 { 552 struct cpdma_ctlr *ctlr; 553 unsigned long flags; 554 555 if (!chan) 556 return -EINVAL; 557 ctlr = chan->ctlr; 558 559 spin_lock_irqsave(&ctlr->lock, flags); 560 if (chan->state != CPDMA_STATE_IDLE) 561 cpdma_chan_stop(chan); 562 ctlr->channels[chan->chan_num] = NULL; 563 spin_unlock_irqrestore(&ctlr->lock, flags); 564 kfree(chan); 565 return 0; 566 } 567 EXPORT_SYMBOL_GPL(cpdma_chan_destroy); 568 569 int cpdma_chan_get_stats(struct cpdma_chan *chan, 570 struct cpdma_chan_stats *stats) 571 { 572 unsigned long flags; 573 if (!chan) 574 return -EINVAL; 575 spin_lock_irqsave(&chan->lock, flags); 576 memcpy(stats, &chan->stats, sizeof(*stats)); 577 spin_unlock_irqrestore(&chan->lock, flags); 578 return 0; 579 } 580 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); 581 582 int cpdma_chan_dump(struct cpdma_chan *chan) 583 { 584 unsigned long flags; 585 struct device *dev = chan->ctlr->dev; 586 587 spin_lock_irqsave(&chan->lock, flags); 588 589 dev_info(dev, "channel %d (%s %d) state %s", 590 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", 591 chan_linear(chan), cpdma_state_str[chan->state]); 592 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); 593 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); 594 if (chan->rxfree) { 595 dev_info(dev, "\trxfree: %x\n", 596 chan_read(chan, rxfree)); 597 } 598 599 dev_info(dev, "\tstats head_enqueue: %d\n", 600 chan->stats.head_enqueue); 601 dev_info(dev, "\tstats tail_enqueue: %d\n", 602 chan->stats.tail_enqueue); 603 dev_info(dev, "\tstats pad_enqueue: %d\n", 604 chan->stats.pad_enqueue); 605 dev_info(dev, "\tstats misqueued: %d\n", 606 chan->stats.misqueued); 607 dev_info(dev, "\tstats desc_alloc_fail: %d\n", 608 chan->stats.desc_alloc_fail); 609 dev_info(dev, "\tstats pad_alloc_fail: %d\n", 610 chan->stats.pad_alloc_fail); 611 dev_info(dev, "\tstats runt_receive_buff: %d\n", 612 chan->stats.runt_receive_buff); 613 dev_info(dev, "\tstats runt_transmit_buff: %d\n", 614 chan->stats.runt_transmit_buff); 615 dev_info(dev, "\tstats empty_dequeue: %d\n", 616 chan->stats.empty_dequeue); 617 dev_info(dev, "\tstats busy_dequeue: %d\n", 618 chan->stats.busy_dequeue); 619 dev_info(dev, "\tstats good_dequeue: %d\n", 620 chan->stats.good_dequeue); 621 dev_info(dev, "\tstats requeue: %d\n", 622 chan->stats.requeue); 623 dev_info(dev, "\tstats teardown_dequeue: %d\n", 624 chan->stats.teardown_dequeue); 625 626 spin_unlock_irqrestore(&chan->lock, flags); 627 return 0; 628 } 629 630 static void __cpdma_chan_submit(struct cpdma_chan *chan, 631 struct cpdma_desc __iomem *desc) 632 { 633 struct cpdma_ctlr *ctlr = chan->ctlr; 634 struct cpdma_desc __iomem *prev = chan->tail; 635 struct cpdma_desc_pool *pool = ctlr->pool; 636 dma_addr_t desc_dma; 637 u32 mode; 638 639 desc_dma = desc_phys(pool, desc); 640 641 /* simple case - idle channel */ 642 if (!chan->head) { 643 chan->stats.head_enqueue++; 644 chan->head = desc; 645 chan->tail = desc; 646 if (chan->state == CPDMA_STATE_ACTIVE) 647 chan_write(chan, hdp, desc_dma); 648 return; 649 } 650 651 /* first chain the descriptor at the tail of the list */ 652 desc_write(prev, hw_next, desc_dma); 653 chan->tail = desc; 654 chan->stats.tail_enqueue++; 655 656 /* next check if EOQ has been triggered already */ 657 mode = desc_read(prev, hw_mode); 658 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && 659 (chan->state == CPDMA_STATE_ACTIVE)) { 660 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); 661 chan_write(chan, hdp, desc_dma); 662 chan->stats.misqueued++; 663 } 664 } 665 666 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 667 int len, int directed) 668 { 669 struct cpdma_ctlr *ctlr = chan->ctlr; 670 struct cpdma_desc __iomem *desc; 671 dma_addr_t buffer; 672 unsigned long flags; 673 u32 mode; 674 int ret = 0; 675 676 spin_lock_irqsave(&chan->lock, flags); 677 678 if (chan->state == CPDMA_STATE_TEARDOWN) { 679 ret = -EINVAL; 680 goto unlock_ret; 681 } 682 683 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); 684 if (!desc) { 685 chan->stats.desc_alloc_fail++; 686 ret = -ENOMEM; 687 goto unlock_ret; 688 } 689 690 if (len < ctlr->params.min_packet_size) { 691 len = ctlr->params.min_packet_size; 692 chan->stats.runt_transmit_buff++; 693 } 694 695 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 696 ret = dma_mapping_error(ctlr->dev, buffer); 697 if (ret) { 698 cpdma_desc_free(ctlr->pool, desc, 1); 699 ret = -EINVAL; 700 goto unlock_ret; 701 } 702 703 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 704 cpdma_desc_to_port(chan, mode, directed); 705 706 desc_write(desc, hw_next, 0); 707 desc_write(desc, hw_buffer, buffer); 708 desc_write(desc, hw_len, len); 709 desc_write(desc, hw_mode, mode | len); 710 desc_write(desc, sw_token, token); 711 desc_write(desc, sw_buffer, buffer); 712 desc_write(desc, sw_len, len); 713 714 __cpdma_chan_submit(chan, desc); 715 716 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) 717 chan_write(chan, rxfree, 1); 718 719 chan->count++; 720 721 unlock_ret: 722 spin_unlock_irqrestore(&chan->lock, flags); 723 return ret; 724 } 725 EXPORT_SYMBOL_GPL(cpdma_chan_submit); 726 727 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) 728 { 729 unsigned long flags; 730 int index; 731 bool ret; 732 struct cpdma_ctlr *ctlr = chan->ctlr; 733 struct cpdma_desc_pool *pool = ctlr->pool; 734 735 spin_lock_irqsave(&pool->lock, flags); 736 737 index = bitmap_find_next_zero_area(pool->bitmap, 738 pool->num_desc, pool->num_desc/2, 1, 0); 739 740 if (index < pool->num_desc) 741 ret = true; 742 else 743 ret = false; 744 745 spin_unlock_irqrestore(&pool->lock, flags); 746 return ret; 747 } 748 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); 749 750 static void __cpdma_chan_free(struct cpdma_chan *chan, 751 struct cpdma_desc __iomem *desc, 752 int outlen, int status) 753 { 754 struct cpdma_ctlr *ctlr = chan->ctlr; 755 struct cpdma_desc_pool *pool = ctlr->pool; 756 dma_addr_t buff_dma; 757 int origlen; 758 void *token; 759 760 token = (void *)desc_read(desc, sw_token); 761 buff_dma = desc_read(desc, sw_buffer); 762 origlen = desc_read(desc, sw_len); 763 764 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); 765 cpdma_desc_free(pool, desc, 1); 766 (*chan->handler)(token, outlen, status); 767 } 768 769 static int __cpdma_chan_process(struct cpdma_chan *chan) 770 { 771 struct cpdma_ctlr *ctlr = chan->ctlr; 772 struct cpdma_desc __iomem *desc; 773 int status, outlen; 774 int cb_status = 0; 775 struct cpdma_desc_pool *pool = ctlr->pool; 776 dma_addr_t desc_dma; 777 unsigned long flags; 778 779 spin_lock_irqsave(&chan->lock, flags); 780 781 desc = chan->head; 782 if (!desc) { 783 chan->stats.empty_dequeue++; 784 status = -ENOENT; 785 goto unlock_ret; 786 } 787 desc_dma = desc_phys(pool, desc); 788 789 status = __raw_readl(&desc->hw_mode); 790 outlen = status & 0x7ff; 791 if (status & CPDMA_DESC_OWNER) { 792 chan->stats.busy_dequeue++; 793 status = -EBUSY; 794 goto unlock_ret; 795 } 796 797 if (status & CPDMA_DESC_PASS_CRC) 798 outlen -= CPDMA_DESC_CRC_LEN; 799 800 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | 801 CPDMA_DESC_PORT_MASK); 802 803 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); 804 chan_write(chan, cp, desc_dma); 805 chan->count--; 806 chan->stats.good_dequeue++; 807 808 if (status & CPDMA_DESC_EOQ) { 809 chan->stats.requeue++; 810 chan_write(chan, hdp, desc_phys(pool, chan->head)); 811 } 812 813 spin_unlock_irqrestore(&chan->lock, flags); 814 if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) 815 cb_status = -ENOSYS; 816 else 817 cb_status = status; 818 819 __cpdma_chan_free(chan, desc, outlen, cb_status); 820 return status; 821 822 unlock_ret: 823 spin_unlock_irqrestore(&chan->lock, flags); 824 return status; 825 } 826 827 int cpdma_chan_process(struct cpdma_chan *chan, int quota) 828 { 829 int used = 0, ret = 0; 830 831 if (chan->state != CPDMA_STATE_ACTIVE) 832 return -EINVAL; 833 834 while (used < quota) { 835 ret = __cpdma_chan_process(chan); 836 if (ret < 0) 837 break; 838 used++; 839 } 840 return used; 841 } 842 EXPORT_SYMBOL_GPL(cpdma_chan_process); 843 844 int cpdma_chan_start(struct cpdma_chan *chan) 845 { 846 struct cpdma_ctlr *ctlr = chan->ctlr; 847 struct cpdma_desc_pool *pool = ctlr->pool; 848 unsigned long flags; 849 850 spin_lock_irqsave(&chan->lock, flags); 851 if (chan->state != CPDMA_STATE_IDLE) { 852 spin_unlock_irqrestore(&chan->lock, flags); 853 return -EBUSY; 854 } 855 if (ctlr->state != CPDMA_STATE_ACTIVE) { 856 spin_unlock_irqrestore(&chan->lock, flags); 857 return -EINVAL; 858 } 859 dma_reg_write(ctlr, chan->int_set, chan->mask); 860 chan->state = CPDMA_STATE_ACTIVE; 861 if (chan->head) { 862 chan_write(chan, hdp, desc_phys(pool, chan->head)); 863 if (chan->rxfree) 864 chan_write(chan, rxfree, chan->count); 865 } 866 867 spin_unlock_irqrestore(&chan->lock, flags); 868 return 0; 869 } 870 EXPORT_SYMBOL_GPL(cpdma_chan_start); 871 872 int cpdma_chan_stop(struct cpdma_chan *chan) 873 { 874 struct cpdma_ctlr *ctlr = chan->ctlr; 875 struct cpdma_desc_pool *pool = ctlr->pool; 876 unsigned long flags; 877 int ret; 878 unsigned timeout; 879 880 spin_lock_irqsave(&chan->lock, flags); 881 if (chan->state == CPDMA_STATE_TEARDOWN) { 882 spin_unlock_irqrestore(&chan->lock, flags); 883 return -EINVAL; 884 } 885 886 chan->state = CPDMA_STATE_TEARDOWN; 887 dma_reg_write(ctlr, chan->int_clear, chan->mask); 888 889 /* trigger teardown */ 890 dma_reg_write(ctlr, chan->td, chan_linear(chan)); 891 892 /* wait for teardown complete */ 893 timeout = 100 * 100; /* 100 ms */ 894 while (timeout) { 895 u32 cp = chan_read(chan, cp); 896 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 897 break; 898 udelay(10); 899 timeout--; 900 } 901 WARN_ON(!timeout); 902 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 903 904 /* handle completed packets */ 905 spin_unlock_irqrestore(&chan->lock, flags); 906 do { 907 ret = __cpdma_chan_process(chan); 908 if (ret < 0) 909 break; 910 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); 911 spin_lock_irqsave(&chan->lock, flags); 912 913 /* remaining packets haven't been tx/rx'ed, clean them up */ 914 while (chan->head) { 915 struct cpdma_desc __iomem *desc = chan->head; 916 dma_addr_t next_dma; 917 918 next_dma = desc_read(desc, hw_next); 919 chan->head = desc_from_phys(pool, next_dma); 920 chan->count--; 921 chan->stats.teardown_dequeue++; 922 923 /* issue callback without locks held */ 924 spin_unlock_irqrestore(&chan->lock, flags); 925 __cpdma_chan_free(chan, desc, 0, -ENOSYS); 926 spin_lock_irqsave(&chan->lock, flags); 927 } 928 929 chan->state = CPDMA_STATE_IDLE; 930 spin_unlock_irqrestore(&chan->lock, flags); 931 return 0; 932 } 933 EXPORT_SYMBOL_GPL(cpdma_chan_stop); 934 935 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 936 { 937 unsigned long flags; 938 939 spin_lock_irqsave(&chan->lock, flags); 940 if (chan->state != CPDMA_STATE_ACTIVE) { 941 spin_unlock_irqrestore(&chan->lock, flags); 942 return -EINVAL; 943 } 944 945 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, 946 chan->mask); 947 spin_unlock_irqrestore(&chan->lock, flags); 948 949 return 0; 950 } 951 952 struct cpdma_control_info { 953 u32 reg; 954 u32 shift, mask; 955 int access; 956 #define ACCESS_RO BIT(0) 957 #define ACCESS_WO BIT(1) 958 #define ACCESS_RW (ACCESS_RO | ACCESS_WO) 959 }; 960 961 static struct cpdma_control_info controls[] = { 962 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, 963 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, 964 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, 965 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, 966 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, 967 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, 968 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, 969 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, 970 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, 971 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, 972 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, 973 }; 974 975 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 976 { 977 unsigned long flags; 978 struct cpdma_control_info *info = &controls[control]; 979 int ret; 980 981 spin_lock_irqsave(&ctlr->lock, flags); 982 983 ret = -ENOTSUPP; 984 if (!ctlr->params.has_ext_regs) 985 goto unlock_ret; 986 987 ret = -EINVAL; 988 if (ctlr->state != CPDMA_STATE_ACTIVE) 989 goto unlock_ret; 990 991 ret = -ENOENT; 992 if (control < 0 || control >= ARRAY_SIZE(controls)) 993 goto unlock_ret; 994 995 ret = -EPERM; 996 if ((info->access & ACCESS_RO) != ACCESS_RO) 997 goto unlock_ret; 998 999 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; 1000 1001 unlock_ret: 1002 spin_unlock_irqrestore(&ctlr->lock, flags); 1003 return ret; 1004 } 1005 1006 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 1007 { 1008 unsigned long flags; 1009 struct cpdma_control_info *info = &controls[control]; 1010 int ret; 1011 u32 val; 1012 1013 spin_lock_irqsave(&ctlr->lock, flags); 1014 1015 ret = -ENOTSUPP; 1016 if (!ctlr->params.has_ext_regs) 1017 goto unlock_ret; 1018 1019 ret = -EINVAL; 1020 if (ctlr->state != CPDMA_STATE_ACTIVE) 1021 goto unlock_ret; 1022 1023 ret = -ENOENT; 1024 if (control < 0 || control >= ARRAY_SIZE(controls)) 1025 goto unlock_ret; 1026 1027 ret = -EPERM; 1028 if ((info->access & ACCESS_WO) != ACCESS_WO) 1029 goto unlock_ret; 1030 1031 val = dma_reg_read(ctlr, info->reg); 1032 val &= ~(info->mask << info->shift); 1033 val |= (value & info->mask) << info->shift; 1034 dma_reg_write(ctlr, info->reg, val); 1035 ret = 0; 1036 1037 unlock_ret: 1038 spin_unlock_irqrestore(&ctlr->lock, flags); 1039 return ret; 1040 } 1041 EXPORT_SYMBOL_GPL(cpdma_control_set); 1042 1043 MODULE_LICENSE("GPL"); 1044