1 /* 2 * Renesas USB driver 3 * 4 * Copyright (C) 2011 Renesas Solutions Corp. 5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 15 * 16 */ 17 #include <linux/delay.h> 18 #include <linux/io.h> 19 #include <linux/scatterlist.h> 20 #include "common.h" 21 #include "pipe.h" 22 23 #define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo)) 24 #define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f) 25 26 #define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */ 27 28 /* 29 * packet initialize 30 */ 31 void usbhs_pkt_init(struct usbhs_pkt *pkt) 32 { 33 INIT_LIST_HEAD(&pkt->node); 34 } 35 36 /* 37 * packet control function 38 */ 39 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done) 40 { 41 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe); 42 struct device *dev = usbhs_priv_to_dev(priv); 43 44 dev_err(dev, "null handler\n"); 45 46 return -EINVAL; 47 } 48 49 static const struct usbhs_pkt_handle usbhsf_null_handler = { 50 .prepare = usbhsf_null_handle, 51 .try_run = usbhsf_null_handle, 52 }; 53 54 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt, 55 void (*done)(struct usbhs_priv *priv, 56 struct usbhs_pkt *pkt), 57 void *buf, int len, int zero, int sequence) 58 { 59 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 60 struct device *dev = usbhs_priv_to_dev(priv); 61 unsigned long flags; 62 63 if (!done) { 64 dev_err(dev, "no done function\n"); 65 return; 66 } 67 68 /******************** spin lock ********************/ 69 usbhs_lock(priv, flags); 70 71 if (!pipe->handler) { 72 dev_err(dev, "no handler function\n"); 73 pipe->handler = &usbhsf_null_handler; 74 } 75 76 list_move_tail(&pkt->node, &pipe->list); 77 78 /* 79 * each pkt must hold own handler. 80 * because handler might be changed by its situation. 81 * dma handler -> pio handler. 82 */ 83 pkt->pipe = pipe; 84 pkt->buf = buf; 85 pkt->handler = pipe->handler; 86 pkt->length = len; 87 pkt->zero = zero; 88 pkt->actual = 0; 89 pkt->done = done; 90 pkt->sequence = sequence; 91 92 usbhs_unlock(priv, flags); 93 /******************** spin unlock ******************/ 94 } 95 96 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt) 97 { 98 list_del_init(&pkt->node); 99 } 100 101 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) 102 { 103 return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node); 104 } 105 106 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, 107 struct usbhs_fifo *fifo); 108 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe, 109 struct usbhs_fifo *fifo); 110 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, 111 struct usbhs_pkt *pkt); 112 #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) 113 #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) 114 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); 115 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) 116 { 117 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 118 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); 119 unsigned long flags; 120 121 /******************** spin lock ********************/ 122 usbhs_lock(priv, flags); 123 124 usbhs_pipe_disable(pipe); 125 126 if (!pkt) 127 pkt = __usbhsf_pkt_get(pipe); 128 129 if (pkt) { 130 struct dma_chan *chan = NULL; 131 132 if (fifo) 133 chan = usbhsf_dma_chan_get(fifo, pkt); 134 if (chan) { 135 dmaengine_terminate_all(chan); 136 usbhsf_fifo_clear(pipe, fifo); 137 usbhsf_dma_unmap(pkt); 138 } 139 140 __usbhsf_pkt_del(pkt); 141 } 142 143 if (fifo) 144 usbhsf_fifo_unselect(pipe, fifo); 145 146 usbhs_unlock(priv, flags); 147 /******************** spin unlock ******************/ 148 149 return pkt; 150 } 151 152 enum { 153 USBHSF_PKT_PREPARE, 154 USBHSF_PKT_TRY_RUN, 155 USBHSF_PKT_DMA_DONE, 156 }; 157 158 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type) 159 { 160 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 161 struct usbhs_pkt *pkt; 162 struct device *dev = usbhs_priv_to_dev(priv); 163 int (*func)(struct usbhs_pkt *pkt, int *is_done); 164 unsigned long flags; 165 int ret = 0; 166 int is_done = 0; 167 168 /******************** spin lock ********************/ 169 usbhs_lock(priv, flags); 170 171 pkt = __usbhsf_pkt_get(pipe); 172 if (!pkt) 173 goto __usbhs_pkt_handler_end; 174 175 switch (type) { 176 case USBHSF_PKT_PREPARE: 177 func = pkt->handler->prepare; 178 break; 179 case USBHSF_PKT_TRY_RUN: 180 func = pkt->handler->try_run; 181 break; 182 case USBHSF_PKT_DMA_DONE: 183 func = pkt->handler->dma_done; 184 break; 185 default: 186 dev_err(dev, "unknown pkt handler\n"); 187 goto __usbhs_pkt_handler_end; 188 } 189 190 if (likely(func)) 191 ret = func(pkt, &is_done); 192 193 if (is_done) 194 __usbhsf_pkt_del(pkt); 195 196 __usbhs_pkt_handler_end: 197 usbhs_unlock(priv, flags); 198 /******************** spin unlock ******************/ 199 200 if (is_done) { 201 pkt->done(priv, pkt); 202 usbhs_pkt_start(pipe); 203 } 204 205 return ret; 206 } 207 208 void usbhs_pkt_start(struct usbhs_pipe *pipe) 209 { 210 usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE); 211 } 212 213 /* 214 * irq enable/disable function 215 */ 216 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e) 217 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e) 218 #define usbhsf_irq_callback_ctrl(pipe, status, enable) \ 219 ({ \ 220 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \ 221 struct usbhs_mod *mod = usbhs_mod_get_current(priv); \ 222 u16 status = (1 << usbhs_pipe_number(pipe)); \ 223 if (!mod) \ 224 return; \ 225 if (enable) \ 226 mod->status |= status; \ 227 else \ 228 mod->status &= ~status; \ 229 usbhs_irq_callback_update(priv, mod); \ 230 }) 231 232 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable) 233 { 234 /* 235 * And DCP pipe can NOT use "ready interrupt" for "send" 236 * it should use "empty" interrupt. 237 * see 238 * "Operation" - "Interrupt Function" - "BRDY Interrupt" 239 * 240 * on the other hand, normal pipe can use "ready interrupt" for "send" 241 * even though it is single/double buffer 242 */ 243 if (usbhs_pipe_is_dcp(pipe)) 244 usbhsf_irq_empty_ctrl(pipe, enable); 245 else 246 usbhsf_irq_ready_ctrl(pipe, enable); 247 } 248 249 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable) 250 { 251 usbhsf_irq_ready_ctrl(pipe, enable); 252 } 253 254 /* 255 * FIFO ctrl 256 */ 257 static void usbhsf_send_terminator(struct usbhs_pipe *pipe, 258 struct usbhs_fifo *fifo) 259 { 260 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 261 262 usbhs_bset(priv, fifo->ctr, BVAL, BVAL); 263 } 264 265 static int usbhsf_fifo_barrier(struct usbhs_priv *priv, 266 struct usbhs_fifo *fifo) 267 { 268 int timeout = 1024; 269 270 do { 271 /* The FIFO port is accessible */ 272 if (usbhs_read(priv, fifo->ctr) & FRDY) 273 return 0; 274 275 udelay(10); 276 } while (timeout--); 277 278 return -EBUSY; 279 } 280 281 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, 282 struct usbhs_fifo *fifo) 283 { 284 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 285 int ret = 0; 286 287 if (!usbhs_pipe_is_dcp(pipe)) { 288 /* 289 * This driver checks the pipe condition first to avoid -EBUSY 290 * from usbhsf_fifo_barrier() with about 10 msec delay in 291 * the interrupt handler if the pipe is RX direction and empty. 292 */ 293 if (usbhs_pipe_is_dir_in(pipe)) 294 ret = usbhs_pipe_is_accessible(pipe); 295 if (!ret) 296 ret = usbhsf_fifo_barrier(priv, fifo); 297 } 298 299 /* 300 * if non-DCP pipe, this driver should set BCLR when 301 * usbhsf_fifo_barrier() returns 0. 302 */ 303 if (!ret) 304 usbhs_write(priv, fifo->ctr, BCLR); 305 } 306 307 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv, 308 struct usbhs_fifo *fifo) 309 { 310 return usbhs_read(priv, fifo->ctr) & DTLN_MASK; 311 } 312 313 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe, 314 struct usbhs_fifo *fifo) 315 { 316 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 317 318 usbhs_pipe_select_fifo(pipe, NULL); 319 usbhs_write(priv, fifo->sel, 0); 320 } 321 322 static int usbhsf_fifo_select(struct usbhs_pipe *pipe, 323 struct usbhs_fifo *fifo, 324 int write) 325 { 326 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 327 struct device *dev = usbhs_priv_to_dev(priv); 328 int timeout = 1024; 329 u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */ 330 u16 base = usbhs_pipe_number(pipe); /* CURPIPE */ 331 332 if (usbhs_pipe_is_busy(pipe) || 333 usbhsf_fifo_is_busy(fifo)) 334 return -EBUSY; 335 336 if (usbhs_pipe_is_dcp(pipe)) { 337 base |= (1 == write) << 5; /* ISEL */ 338 339 if (usbhs_mod_is_host(priv)) 340 usbhs_dcp_dir_for_host(pipe, write); 341 } 342 343 /* "base" will be used below */ 344 if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo)) 345 usbhs_write(priv, fifo->sel, base); 346 else 347 usbhs_write(priv, fifo->sel, base | MBW_32); 348 349 /* check ISEL and CURPIPE value */ 350 while (timeout--) { 351 if (base == (mask & usbhs_read(priv, fifo->sel))) { 352 usbhs_pipe_select_fifo(pipe, fifo); 353 return 0; 354 } 355 udelay(10); 356 } 357 358 dev_err(dev, "fifo select error\n"); 359 360 return -EIO; 361 } 362 363 /* 364 * DCP status stage 365 */ 366 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done) 367 { 368 struct usbhs_pipe *pipe = pkt->pipe; 369 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 370 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ 371 struct device *dev = usbhs_priv_to_dev(priv); 372 int ret; 373 374 usbhs_pipe_disable(pipe); 375 376 ret = usbhsf_fifo_select(pipe, fifo, 1); 377 if (ret < 0) { 378 dev_err(dev, "%s() faile\n", __func__); 379 return ret; 380 } 381 382 usbhs_pipe_sequence_data1(pipe); /* DATA1 */ 383 384 usbhsf_fifo_clear(pipe, fifo); 385 usbhsf_send_terminator(pipe, fifo); 386 387 usbhsf_fifo_unselect(pipe, fifo); 388 389 usbhsf_tx_irq_ctrl(pipe, 1); 390 usbhs_pipe_enable(pipe); 391 392 return ret; 393 } 394 395 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done) 396 { 397 struct usbhs_pipe *pipe = pkt->pipe; 398 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 399 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ 400 struct device *dev = usbhs_priv_to_dev(priv); 401 int ret; 402 403 usbhs_pipe_disable(pipe); 404 405 ret = usbhsf_fifo_select(pipe, fifo, 0); 406 if (ret < 0) { 407 dev_err(dev, "%s() fail\n", __func__); 408 return ret; 409 } 410 411 usbhs_pipe_sequence_data1(pipe); /* DATA1 */ 412 usbhsf_fifo_clear(pipe, fifo); 413 414 usbhsf_fifo_unselect(pipe, fifo); 415 416 usbhsf_rx_irq_ctrl(pipe, 1); 417 usbhs_pipe_enable(pipe); 418 419 return ret; 420 421 } 422 423 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done) 424 { 425 struct usbhs_pipe *pipe = pkt->pipe; 426 427 if (pkt->handler == &usbhs_dcp_status_stage_in_handler) 428 usbhsf_tx_irq_ctrl(pipe, 0); 429 else 430 usbhsf_rx_irq_ctrl(pipe, 0); 431 432 pkt->actual = pkt->length; 433 *is_done = 1; 434 435 return 0; 436 } 437 438 const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = { 439 .prepare = usbhs_dcp_dir_switch_to_write, 440 .try_run = usbhs_dcp_dir_switch_done, 441 }; 442 443 const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = { 444 .prepare = usbhs_dcp_dir_switch_to_read, 445 .try_run = usbhs_dcp_dir_switch_done, 446 }; 447 448 /* 449 * DCP data stage (push) 450 */ 451 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done) 452 { 453 struct usbhs_pipe *pipe = pkt->pipe; 454 455 usbhs_pipe_sequence_data1(pipe); /* DATA1 */ 456 457 /* 458 * change handler to PIO push 459 */ 460 pkt->handler = &usbhs_fifo_pio_push_handler; 461 462 return pkt->handler->prepare(pkt, is_done); 463 } 464 465 const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = { 466 .prepare = usbhsf_dcp_data_stage_try_push, 467 }; 468 469 /* 470 * DCP data stage (pop) 471 */ 472 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt, 473 int *is_done) 474 { 475 struct usbhs_pipe *pipe = pkt->pipe; 476 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 477 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); 478 479 if (usbhs_pipe_is_busy(pipe)) 480 return 0; 481 482 /* 483 * prepare pop for DCP should 484 * - change DCP direction, 485 * - clear fifo 486 * - DATA1 487 */ 488 usbhs_pipe_disable(pipe); 489 490 usbhs_pipe_sequence_data1(pipe); /* DATA1 */ 491 492 usbhsf_fifo_select(pipe, fifo, 0); 493 usbhsf_fifo_clear(pipe, fifo); 494 usbhsf_fifo_unselect(pipe, fifo); 495 496 /* 497 * change handler to PIO pop 498 */ 499 pkt->handler = &usbhs_fifo_pio_pop_handler; 500 501 return pkt->handler->prepare(pkt, is_done); 502 } 503 504 const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = { 505 .prepare = usbhsf_dcp_data_stage_prepare_pop, 506 }; 507 508 /* 509 * PIO push handler 510 */ 511 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done) 512 { 513 struct usbhs_pipe *pipe = pkt->pipe; 514 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 515 struct device *dev = usbhs_priv_to_dev(priv); 516 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ 517 void __iomem *addr = priv->base + fifo->port; 518 u8 *buf; 519 int maxp = usbhs_pipe_get_maxpacket(pipe); 520 int total_len; 521 int i, ret, len; 522 int is_short; 523 524 usbhs_pipe_data_sequence(pipe, pkt->sequence); 525 pkt->sequence = -1; /* -1 sequence will be ignored */ 526 527 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); 528 529 ret = usbhsf_fifo_select(pipe, fifo, 1); 530 if (ret < 0) 531 return 0; 532 533 ret = usbhs_pipe_is_accessible(pipe); 534 if (ret < 0) { 535 /* inaccessible pipe is not an error */ 536 ret = 0; 537 goto usbhs_fifo_write_busy; 538 } 539 540 ret = usbhsf_fifo_barrier(priv, fifo); 541 if (ret < 0) 542 goto usbhs_fifo_write_busy; 543 544 buf = pkt->buf + pkt->actual; 545 len = pkt->length - pkt->actual; 546 len = min(len, maxp); 547 total_len = len; 548 is_short = total_len < maxp; 549 550 /* 551 * FIXME 552 * 553 * 32-bit access only 554 */ 555 if (len >= 4 && !((unsigned long)buf & 0x03)) { 556 iowrite32_rep(addr, buf, len / 4); 557 len %= 4; 558 buf += total_len - len; 559 } 560 561 /* the rest operation */ 562 for (i = 0; i < len; i++) 563 iowrite8(buf[i], addr + (0x03 - (i & 0x03))); 564 565 /* 566 * variable update 567 */ 568 pkt->actual += total_len; 569 570 if (pkt->actual < pkt->length) 571 *is_done = 0; /* there are remainder data */ 572 else if (is_short) 573 *is_done = 1; /* short packet */ 574 else 575 *is_done = !pkt->zero; /* send zero packet ? */ 576 577 /* 578 * pipe/irq handling 579 */ 580 if (is_short) 581 usbhsf_send_terminator(pipe, fifo); 582 583 usbhsf_tx_irq_ctrl(pipe, !*is_done); 584 usbhs_pipe_running(pipe, !*is_done); 585 usbhs_pipe_enable(pipe); 586 587 dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", 588 usbhs_pipe_number(pipe), 589 pkt->length, pkt->actual, *is_done, pkt->zero); 590 591 usbhsf_fifo_unselect(pipe, fifo); 592 593 return 0; 594 595 usbhs_fifo_write_busy: 596 usbhsf_fifo_unselect(pipe, fifo); 597 598 /* 599 * pipe is busy. 600 * retry in interrupt 601 */ 602 usbhsf_tx_irq_ctrl(pipe, 1); 603 usbhs_pipe_running(pipe, 1); 604 605 return ret; 606 } 607 608 static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done) 609 { 610 if (usbhs_pipe_is_running(pkt->pipe)) 611 return 0; 612 613 return usbhsf_pio_try_push(pkt, is_done); 614 } 615 616 const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { 617 .prepare = usbhsf_pio_prepare_push, 618 .try_run = usbhsf_pio_try_push, 619 }; 620 621 /* 622 * PIO pop handler 623 */ 624 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) 625 { 626 struct usbhs_pipe *pipe = pkt->pipe; 627 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 628 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); 629 630 if (usbhs_pipe_is_busy(pipe)) 631 return 0; 632 633 if (usbhs_pipe_is_running(pipe)) 634 return 0; 635 636 /* 637 * pipe enable to prepare packet receive 638 */ 639 usbhs_pipe_data_sequence(pipe, pkt->sequence); 640 pkt->sequence = -1; /* -1 sequence will be ignored */ 641 642 if (usbhs_pipe_is_dcp(pipe)) 643 usbhsf_fifo_clear(pipe, fifo); 644 645 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); 646 usbhs_pipe_enable(pipe); 647 usbhs_pipe_running(pipe, 1); 648 usbhsf_rx_irq_ctrl(pipe, 1); 649 650 return 0; 651 } 652 653 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done) 654 { 655 struct usbhs_pipe *pipe = pkt->pipe; 656 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 657 struct device *dev = usbhs_priv_to_dev(priv); 658 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ 659 void __iomem *addr = priv->base + fifo->port; 660 u8 *buf; 661 u32 data = 0; 662 int maxp = usbhs_pipe_get_maxpacket(pipe); 663 int rcv_len, len; 664 int i, ret; 665 int total_len = 0; 666 667 ret = usbhsf_fifo_select(pipe, fifo, 0); 668 if (ret < 0) 669 return 0; 670 671 ret = usbhsf_fifo_barrier(priv, fifo); 672 if (ret < 0) 673 goto usbhs_fifo_read_busy; 674 675 rcv_len = usbhsf_fifo_rcv_len(priv, fifo); 676 677 buf = pkt->buf + pkt->actual; 678 len = pkt->length - pkt->actual; 679 len = min(len, rcv_len); 680 total_len = len; 681 682 /* 683 * update actual length first here to decide disable pipe. 684 * if this pipe keeps BUF status and all data were popped, 685 * then, next interrupt/token will be issued again 686 */ 687 pkt->actual += total_len; 688 689 if ((pkt->actual == pkt->length) || /* receive all data */ 690 (total_len < maxp)) { /* short packet */ 691 *is_done = 1; 692 usbhsf_rx_irq_ctrl(pipe, 0); 693 usbhs_pipe_running(pipe, 0); 694 /* 695 * If function mode, since this controller is possible to enter 696 * Control Write status stage at this timing, this driver 697 * should not disable the pipe. If such a case happens, this 698 * controller is not able to complete the status stage. 699 */ 700 if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe)) 701 usbhs_pipe_disable(pipe); /* disable pipe first */ 702 } 703 704 /* 705 * Buffer clear if Zero-Length packet 706 * 707 * see 708 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function" 709 */ 710 if (0 == rcv_len) { 711 pkt->zero = 1; 712 usbhsf_fifo_clear(pipe, fifo); 713 goto usbhs_fifo_read_end; 714 } 715 716 /* 717 * FIXME 718 * 719 * 32-bit access only 720 */ 721 if (len >= 4 && !((unsigned long)buf & 0x03)) { 722 ioread32_rep(addr, buf, len / 4); 723 len %= 4; 724 buf += total_len - len; 725 } 726 727 /* the rest operation */ 728 for (i = 0; i < len; i++) { 729 if (!(i & 0x03)) 730 data = ioread32(addr); 731 732 buf[i] = (data >> ((i & 0x03) * 8)) & 0xff; 733 } 734 735 usbhs_fifo_read_end: 736 dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n", 737 usbhs_pipe_number(pipe), 738 pkt->length, pkt->actual, *is_done, pkt->zero); 739 740 usbhs_fifo_read_busy: 741 usbhsf_fifo_unselect(pipe, fifo); 742 743 return ret; 744 } 745 746 const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = { 747 .prepare = usbhsf_prepare_pop, 748 .try_run = usbhsf_pio_try_pop, 749 }; 750 751 /* 752 * DCP ctrol statge handler 753 */ 754 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done) 755 { 756 usbhs_dcp_control_transfer_done(pkt->pipe); 757 758 *is_done = 1; 759 760 return 0; 761 } 762 763 const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = { 764 .prepare = usbhsf_ctrl_stage_end, 765 .try_run = usbhsf_ctrl_stage_end, 766 }; 767 768 /* 769 * DMA fifo functions 770 */ 771 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, 772 struct usbhs_pkt *pkt) 773 { 774 if (&usbhs_fifo_dma_push_handler == pkt->handler) 775 return fifo->tx_chan; 776 777 if (&usbhs_fifo_dma_pop_handler == pkt->handler) 778 return fifo->rx_chan; 779 780 return NULL; 781 } 782 783 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv, 784 struct usbhs_pkt *pkt) 785 { 786 struct usbhs_fifo *fifo; 787 int i; 788 789 usbhs_for_each_dfifo(priv, fifo, i) { 790 if (usbhsf_dma_chan_get(fifo, pkt) && 791 !usbhsf_fifo_is_busy(fifo)) 792 return fifo; 793 } 794 795 return NULL; 796 } 797 798 #define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE) 799 #define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0) 800 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe, 801 struct usbhs_fifo *fifo, 802 u16 dreqe) 803 { 804 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 805 806 usbhs_bset(priv, fifo->sel, DREQE, dreqe); 807 } 808 809 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) 810 { 811 struct usbhs_pipe *pipe = pkt->pipe; 812 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 813 struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); 814 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); 815 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); 816 817 return info->dma_map_ctrl(chan->device->dev, pkt, map); 818 } 819 820 static void usbhsf_dma_complete(void *arg); 821 static void xfer_work(struct work_struct *work) 822 { 823 struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work); 824 struct usbhs_pipe *pipe = pkt->pipe; 825 struct usbhs_fifo *fifo; 826 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 827 struct dma_async_tx_descriptor *desc; 828 struct dma_chan *chan; 829 struct device *dev = usbhs_priv_to_dev(priv); 830 enum dma_transfer_direction dir; 831 unsigned long flags; 832 833 usbhs_lock(priv, flags); 834 fifo = usbhs_pipe_to_fifo(pipe); 835 if (!fifo) 836 goto xfer_work_end; 837 838 chan = usbhsf_dma_chan_get(fifo, pkt); 839 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; 840 841 desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual, 842 pkt->trans, dir, 843 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 844 if (!desc) 845 goto xfer_work_end; 846 847 desc->callback = usbhsf_dma_complete; 848 desc->callback_param = pipe; 849 850 pkt->cookie = dmaengine_submit(desc); 851 if (pkt->cookie < 0) { 852 dev_err(dev, "Failed to submit dma descriptor\n"); 853 goto xfer_work_end; 854 } 855 856 dev_dbg(dev, " %s %d (%d/ %d)\n", 857 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); 858 859 usbhs_pipe_running(pipe, 1); 860 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); 861 dma_async_issue_pending(chan); 862 usbhsf_dma_start(pipe, fifo); 863 usbhs_pipe_enable(pipe); 864 865 xfer_work_end: 866 usbhs_unlock(priv, flags); 867 } 868 869 /* 870 * DMA push handler 871 */ 872 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) 873 { 874 struct usbhs_pipe *pipe = pkt->pipe; 875 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 876 struct usbhs_fifo *fifo; 877 int len = pkt->length - pkt->actual; 878 int ret; 879 uintptr_t align_mask; 880 881 if (usbhs_pipe_is_busy(pipe)) 882 return 0; 883 884 /* use PIO if packet is less than pio_dma_border or pipe is DCP */ 885 if ((len < usbhs_get_dparam(priv, pio_dma_border)) || 886 usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) 887 goto usbhsf_pio_prepare_push; 888 889 /* check data length if this driver don't use USB-DMAC */ 890 if (!usbhs_get_dparam(priv, has_usb_dmac) && len & 0x7) 891 goto usbhsf_pio_prepare_push; 892 893 /* check buffer alignment */ 894 align_mask = usbhs_get_dparam(priv, has_usb_dmac) ? 895 USBHS_USB_DMAC_XFER_SIZE - 1 : 0x7; 896 if ((uintptr_t)(pkt->buf + pkt->actual) & align_mask) 897 goto usbhsf_pio_prepare_push; 898 899 /* return at this time if the pipe is running */ 900 if (usbhs_pipe_is_running(pipe)) 901 return 0; 902 903 /* get enable DMA fifo */ 904 fifo = usbhsf_get_dma_fifo(priv, pkt); 905 if (!fifo) 906 goto usbhsf_pio_prepare_push; 907 908 ret = usbhsf_fifo_select(pipe, fifo, 0); 909 if (ret < 0) 910 goto usbhsf_pio_prepare_push; 911 912 if (usbhsf_dma_map(pkt) < 0) 913 goto usbhsf_pio_prepare_push_unselect; 914 915 pkt->trans = len; 916 917 usbhsf_tx_irq_ctrl(pipe, 0); 918 INIT_WORK(&pkt->work, xfer_work); 919 schedule_work(&pkt->work); 920 921 return 0; 922 923 usbhsf_pio_prepare_push_unselect: 924 usbhsf_fifo_unselect(pipe, fifo); 925 usbhsf_pio_prepare_push: 926 /* 927 * change handler to PIO 928 */ 929 pkt->handler = &usbhs_fifo_pio_push_handler; 930 931 return pkt->handler->prepare(pkt, is_done); 932 } 933 934 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done) 935 { 936 struct usbhs_pipe *pipe = pkt->pipe; 937 int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe); 938 939 pkt->actual += pkt->trans; 940 941 if (pkt->actual < pkt->length) 942 *is_done = 0; /* there are remainder data */ 943 else if (is_short) 944 *is_done = 1; /* short packet */ 945 else 946 *is_done = !pkt->zero; /* send zero packet? */ 947 948 usbhs_pipe_running(pipe, !*is_done); 949 950 usbhsf_dma_stop(pipe, pipe->fifo); 951 usbhsf_dma_unmap(pkt); 952 usbhsf_fifo_unselect(pipe, pipe->fifo); 953 954 if (!*is_done) { 955 /* change handler to PIO */ 956 pkt->handler = &usbhs_fifo_pio_push_handler; 957 return pkt->handler->try_run(pkt, is_done); 958 } 959 960 return 0; 961 } 962 963 const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = { 964 .prepare = usbhsf_dma_prepare_push, 965 .dma_done = usbhsf_dma_push_done, 966 }; 967 968 /* 969 * DMA pop handler 970 */ 971 972 static int usbhsf_dma_prepare_pop_with_rx_irq(struct usbhs_pkt *pkt, 973 int *is_done) 974 { 975 return usbhsf_prepare_pop(pkt, is_done); 976 } 977 978 static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, 979 int *is_done) 980 { 981 struct usbhs_pipe *pipe = pkt->pipe; 982 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 983 struct usbhs_fifo *fifo; 984 int ret; 985 986 if (usbhs_pipe_is_busy(pipe)) 987 return 0; 988 989 /* use PIO if packet is less than pio_dma_border or pipe is DCP */ 990 if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || 991 usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) 992 goto usbhsf_pio_prepare_pop; 993 994 fifo = usbhsf_get_dma_fifo(priv, pkt); 995 if (!fifo) 996 goto usbhsf_pio_prepare_pop; 997 998 if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1)) 999 goto usbhsf_pio_prepare_pop; 1000 1001 usbhs_pipe_config_change_bfre(pipe, 1); 1002 1003 ret = usbhsf_fifo_select(pipe, fifo, 0); 1004 if (ret < 0) 1005 goto usbhsf_pio_prepare_pop; 1006 1007 if (usbhsf_dma_map(pkt) < 0) 1008 goto usbhsf_pio_prepare_pop_unselect; 1009 1010 /* DMA */ 1011 1012 /* 1013 * usbhs_fifo_dma_pop_handler :: prepare 1014 * enabled irq to come here. 1015 * but it is no longer needed for DMA. disable it. 1016 */ 1017 usbhsf_rx_irq_ctrl(pipe, 0); 1018 1019 pkt->trans = pkt->length; 1020 1021 INIT_WORK(&pkt->work, xfer_work); 1022 schedule_work(&pkt->work); 1023 1024 return 0; 1025 1026 usbhsf_pio_prepare_pop_unselect: 1027 usbhsf_fifo_unselect(pipe, fifo); 1028 usbhsf_pio_prepare_pop: 1029 1030 /* 1031 * change handler to PIO 1032 */ 1033 pkt->handler = &usbhs_fifo_pio_pop_handler; 1034 usbhs_pipe_config_change_bfre(pipe, 0); 1035 1036 return pkt->handler->prepare(pkt, is_done); 1037 } 1038 1039 static int usbhsf_dma_prepare_pop(struct usbhs_pkt *pkt, int *is_done) 1040 { 1041 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe); 1042 1043 if (usbhs_get_dparam(priv, has_usb_dmac)) 1044 return usbhsf_dma_prepare_pop_with_usb_dmac(pkt, is_done); 1045 else 1046 return usbhsf_dma_prepare_pop_with_rx_irq(pkt, is_done); 1047 } 1048 1049 static int usbhsf_dma_try_pop_with_rx_irq(struct usbhs_pkt *pkt, int *is_done) 1050 { 1051 struct usbhs_pipe *pipe = pkt->pipe; 1052 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 1053 struct usbhs_fifo *fifo; 1054 int len, ret; 1055 1056 if (usbhs_pipe_is_busy(pipe)) 1057 return 0; 1058 1059 if (usbhs_pipe_is_dcp(pipe)) 1060 goto usbhsf_pio_prepare_pop; 1061 1062 /* get enable DMA fifo */ 1063 fifo = usbhsf_get_dma_fifo(priv, pkt); 1064 if (!fifo) 1065 goto usbhsf_pio_prepare_pop; 1066 1067 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ 1068 goto usbhsf_pio_prepare_pop; 1069 1070 ret = usbhsf_fifo_select(pipe, fifo, 0); 1071 if (ret < 0) 1072 goto usbhsf_pio_prepare_pop; 1073 1074 /* use PIO if packet is less than pio_dma_border */ 1075 len = usbhsf_fifo_rcv_len(priv, fifo); 1076 len = min(pkt->length - pkt->actual, len); 1077 if (len & 0x7) /* 8byte alignment */ 1078 goto usbhsf_pio_prepare_pop_unselect; 1079 1080 if (len < usbhs_get_dparam(priv, pio_dma_border)) 1081 goto usbhsf_pio_prepare_pop_unselect; 1082 1083 ret = usbhsf_fifo_barrier(priv, fifo); 1084 if (ret < 0) 1085 goto usbhsf_pio_prepare_pop_unselect; 1086 1087 if (usbhsf_dma_map(pkt) < 0) 1088 goto usbhsf_pio_prepare_pop_unselect; 1089 1090 /* DMA */ 1091 1092 /* 1093 * usbhs_fifo_dma_pop_handler :: prepare 1094 * enabled irq to come here. 1095 * but it is no longer needed for DMA. disable it. 1096 */ 1097 usbhsf_rx_irq_ctrl(pipe, 0); 1098 1099 pkt->trans = len; 1100 1101 INIT_WORK(&pkt->work, xfer_work); 1102 schedule_work(&pkt->work); 1103 1104 return 0; 1105 1106 usbhsf_pio_prepare_pop_unselect: 1107 usbhsf_fifo_unselect(pipe, fifo); 1108 usbhsf_pio_prepare_pop: 1109 1110 /* 1111 * change handler to PIO 1112 */ 1113 pkt->handler = &usbhs_fifo_pio_pop_handler; 1114 1115 return pkt->handler->try_run(pkt, is_done); 1116 } 1117 1118 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done) 1119 { 1120 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe); 1121 1122 BUG_ON(usbhs_get_dparam(priv, has_usb_dmac)); 1123 1124 return usbhsf_dma_try_pop_with_rx_irq(pkt, is_done); 1125 } 1126 1127 static int usbhsf_dma_pop_done_with_rx_irq(struct usbhs_pkt *pkt, int *is_done) 1128 { 1129 struct usbhs_pipe *pipe = pkt->pipe; 1130 int maxp = usbhs_pipe_get_maxpacket(pipe); 1131 1132 usbhsf_dma_stop(pipe, pipe->fifo); 1133 usbhsf_dma_unmap(pkt); 1134 usbhsf_fifo_unselect(pipe, pipe->fifo); 1135 1136 pkt->actual += pkt->trans; 1137 1138 if ((pkt->actual == pkt->length) || /* receive all data */ 1139 (pkt->trans < maxp)) { /* short packet */ 1140 *is_done = 1; 1141 usbhs_pipe_running(pipe, 0); 1142 } else { 1143 /* re-enable */ 1144 usbhs_pipe_running(pipe, 0); 1145 usbhsf_prepare_pop(pkt, is_done); 1146 } 1147 1148 return 0; 1149 } 1150 1151 static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt, 1152 struct dma_chan *chan, int dtln) 1153 { 1154 struct usbhs_pipe *pipe = pkt->pipe; 1155 struct dma_tx_state state; 1156 size_t received_size; 1157 int maxp = usbhs_pipe_get_maxpacket(pipe); 1158 1159 dmaengine_tx_status(chan, pkt->cookie, &state); 1160 received_size = pkt->length - state.residue; 1161 1162 if (dtln) { 1163 received_size -= USBHS_USB_DMAC_XFER_SIZE; 1164 received_size &= ~(maxp - 1); 1165 received_size += dtln; 1166 } 1167 1168 return received_size; 1169 } 1170 1171 static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt, 1172 int *is_done) 1173 { 1174 struct usbhs_pipe *pipe = pkt->pipe; 1175 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 1176 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); 1177 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); 1178 int rcv_len; 1179 1180 /* 1181 * Since the driver disables rx_irq in DMA mode, the interrupt handler 1182 * cannot the BRDYSTS. So, the function clears it here because the 1183 * driver may use PIO mode next time. 1184 */ 1185 usbhs_xxxsts_clear(priv, BRDYSTS, usbhs_pipe_number(pipe)); 1186 1187 rcv_len = usbhsf_fifo_rcv_len(priv, fifo); 1188 usbhsf_fifo_clear(pipe, fifo); 1189 pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); 1190 1191 usbhsf_dma_stop(pipe, fifo); 1192 usbhsf_dma_unmap(pkt); 1193 usbhsf_fifo_unselect(pipe, pipe->fifo); 1194 1195 /* The driver can assume the rx transaction is always "done" */ 1196 *is_done = 1; 1197 1198 return 0; 1199 } 1200 1201 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done) 1202 { 1203 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe); 1204 1205 if (usbhs_get_dparam(priv, has_usb_dmac)) 1206 return usbhsf_dma_pop_done_with_usb_dmac(pkt, is_done); 1207 else 1208 return usbhsf_dma_pop_done_with_rx_irq(pkt, is_done); 1209 } 1210 1211 const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = { 1212 .prepare = usbhsf_dma_prepare_pop, 1213 .try_run = usbhsf_dma_try_pop, 1214 .dma_done = usbhsf_dma_pop_done 1215 }; 1216 1217 /* 1218 * DMA setting 1219 */ 1220 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param) 1221 { 1222 struct sh_dmae_slave *slave = param; 1223 1224 /* 1225 * FIXME 1226 * 1227 * usbhs doesn't recognize id = 0 as valid DMA 1228 */ 1229 if (0 == slave->shdma_slave.slave_id) 1230 return false; 1231 1232 chan->private = slave; 1233 1234 return true; 1235 } 1236 1237 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo) 1238 { 1239 if (fifo->tx_chan) 1240 dma_release_channel(fifo->tx_chan); 1241 if (fifo->rx_chan) 1242 dma_release_channel(fifo->rx_chan); 1243 1244 fifo->tx_chan = NULL; 1245 fifo->rx_chan = NULL; 1246 } 1247 1248 static void usbhsf_dma_init_pdev(struct usbhs_fifo *fifo) 1249 { 1250 dma_cap_mask_t mask; 1251 1252 dma_cap_zero(mask); 1253 dma_cap_set(DMA_SLAVE, mask); 1254 fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter, 1255 &fifo->tx_slave); 1256 1257 dma_cap_zero(mask); 1258 dma_cap_set(DMA_SLAVE, mask); 1259 fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter, 1260 &fifo->rx_slave); 1261 } 1262 1263 static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo, 1264 int channel) 1265 { 1266 char name[16]; 1267 1268 /* 1269 * To avoid complex handing for DnFIFOs, the driver uses each 1270 * DnFIFO as TX or RX direction (not bi-direction). 1271 * So, the driver uses odd channels for TX, even channels for RX. 1272 */ 1273 snprintf(name, sizeof(name), "ch%d", channel); 1274 if (channel & 1) { 1275 fifo->tx_chan = dma_request_slave_channel_reason(dev, name); 1276 if (IS_ERR(fifo->tx_chan)) 1277 fifo->tx_chan = NULL; 1278 } else { 1279 fifo->rx_chan = dma_request_slave_channel_reason(dev, name); 1280 if (IS_ERR(fifo->rx_chan)) 1281 fifo->rx_chan = NULL; 1282 } 1283 } 1284 1285 static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo, 1286 int channel) 1287 { 1288 struct device *dev = usbhs_priv_to_dev(priv); 1289 1290 if (dev->of_node) 1291 usbhsf_dma_init_dt(dev, fifo, channel); 1292 else 1293 usbhsf_dma_init_pdev(fifo); 1294 1295 if (fifo->tx_chan || fifo->rx_chan) 1296 dev_dbg(dev, "enable DMAEngine (%s%s%s)\n", 1297 fifo->name, 1298 fifo->tx_chan ? "[TX]" : " ", 1299 fifo->rx_chan ? "[RX]" : " "); 1300 } 1301 1302 /* 1303 * irq functions 1304 */ 1305 static int usbhsf_irq_empty(struct usbhs_priv *priv, 1306 struct usbhs_irq_state *irq_state) 1307 { 1308 struct usbhs_pipe *pipe; 1309 struct device *dev = usbhs_priv_to_dev(priv); 1310 int i, ret; 1311 1312 if (!irq_state->bempsts) { 1313 dev_err(dev, "debug %s !!\n", __func__); 1314 return -EIO; 1315 } 1316 1317 dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts); 1318 1319 /* 1320 * search interrupted "pipe" 1321 * not "uep". 1322 */ 1323 usbhs_for_each_pipe_with_dcp(pipe, priv, i) { 1324 if (!(irq_state->bempsts & (1 << i))) 1325 continue; 1326 1327 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN); 1328 if (ret < 0) 1329 dev_err(dev, "irq_empty run_error %d : %d\n", i, ret); 1330 } 1331 1332 return 0; 1333 } 1334 1335 static int usbhsf_irq_ready(struct usbhs_priv *priv, 1336 struct usbhs_irq_state *irq_state) 1337 { 1338 struct usbhs_pipe *pipe; 1339 struct device *dev = usbhs_priv_to_dev(priv); 1340 int i, ret; 1341 1342 if (!irq_state->brdysts) { 1343 dev_err(dev, "debug %s !!\n", __func__); 1344 return -EIO; 1345 } 1346 1347 dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts); 1348 1349 /* 1350 * search interrupted "pipe" 1351 * not "uep". 1352 */ 1353 usbhs_for_each_pipe_with_dcp(pipe, priv, i) { 1354 if (!(irq_state->brdysts & (1 << i))) 1355 continue; 1356 1357 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN); 1358 if (ret < 0) 1359 dev_err(dev, "irq_ready run_error %d : %d\n", i, ret); 1360 } 1361 1362 return 0; 1363 } 1364 1365 static void usbhsf_dma_complete(void *arg) 1366 { 1367 struct usbhs_pipe *pipe = arg; 1368 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 1369 struct device *dev = usbhs_priv_to_dev(priv); 1370 int ret; 1371 1372 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE); 1373 if (ret < 0) 1374 dev_err(dev, "dma_complete run_error %d : %d\n", 1375 usbhs_pipe_number(pipe), ret); 1376 } 1377 1378 void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe) 1379 { 1380 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 1381 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ 1382 1383 /* clear DCP FIFO of transmission */ 1384 if (usbhsf_fifo_select(pipe, fifo, 1) < 0) 1385 return; 1386 usbhsf_fifo_clear(pipe, fifo); 1387 usbhsf_fifo_unselect(pipe, fifo); 1388 1389 /* clear DCP FIFO of reception */ 1390 if (usbhsf_fifo_select(pipe, fifo, 0) < 0) 1391 return; 1392 usbhsf_fifo_clear(pipe, fifo); 1393 usbhsf_fifo_unselect(pipe, fifo); 1394 } 1395 1396 /* 1397 * fifo init 1398 */ 1399 void usbhs_fifo_init(struct usbhs_priv *priv) 1400 { 1401 struct usbhs_mod *mod = usbhs_mod_get_current(priv); 1402 struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv); 1403 struct usbhs_fifo *dfifo; 1404 int i; 1405 1406 mod->irq_empty = usbhsf_irq_empty; 1407 mod->irq_ready = usbhsf_irq_ready; 1408 mod->irq_bempsts = 0; 1409 mod->irq_brdysts = 0; 1410 1411 cfifo->pipe = NULL; 1412 usbhs_for_each_dfifo(priv, dfifo, i) 1413 dfifo->pipe = NULL; 1414 } 1415 1416 void usbhs_fifo_quit(struct usbhs_priv *priv) 1417 { 1418 struct usbhs_mod *mod = usbhs_mod_get_current(priv); 1419 1420 mod->irq_empty = NULL; 1421 mod->irq_ready = NULL; 1422 mod->irq_bempsts = 0; 1423 mod->irq_brdysts = 0; 1424 } 1425 1426 #define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port) \ 1427 do { \ 1428 fifo = usbhsf_get_dnfifo(priv, channel); \ 1429 fifo->name = "D"#channel"FIFO"; \ 1430 fifo->port = fifo_port; \ 1431 fifo->sel = D##channel##FIFOSEL; \ 1432 fifo->ctr = D##channel##FIFOCTR; \ 1433 fifo->tx_slave.shdma_slave.slave_id = \ 1434 usbhs_get_dparam(priv, d##channel##_tx_id); \ 1435 fifo->rx_slave.shdma_slave.slave_id = \ 1436 usbhs_get_dparam(priv, d##channel##_rx_id); \ 1437 usbhsf_dma_init(priv, fifo, channel); \ 1438 } while (0) 1439 1440 #define USBHS_DFIFO_INIT(priv, fifo, channel) \ 1441 __USBHS_DFIFO_INIT(priv, fifo, channel, D##channel##FIFO) 1442 #define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel) \ 1443 __USBHS_DFIFO_INIT(priv, fifo, channel, 0) 1444 1445 int usbhs_fifo_probe(struct usbhs_priv *priv) 1446 { 1447 struct usbhs_fifo *fifo; 1448 1449 /* CFIFO */ 1450 fifo = usbhsf_get_cfifo(priv); 1451 fifo->name = "CFIFO"; 1452 fifo->port = CFIFO; 1453 fifo->sel = CFIFOSEL; 1454 fifo->ctr = CFIFOCTR; 1455 1456 /* DFIFO */ 1457 USBHS_DFIFO_INIT(priv, fifo, 0); 1458 USBHS_DFIFO_INIT(priv, fifo, 1); 1459 USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 2); 1460 USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 3); 1461 1462 return 0; 1463 } 1464 1465 void usbhs_fifo_remove(struct usbhs_priv *priv) 1466 { 1467 struct usbhs_fifo *fifo; 1468 int i; 1469 1470 usbhs_for_each_dfifo(priv, fifo, i) 1471 usbhsf_dma_quit(priv, fifo); 1472 } 1473