Lines Matching +full:mmc +full:-

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
10 #include <linux/dma-mapping.h>
16 #include <linux/mmc/host.h>
17 #include <linux/mmc/mmc.h>
18 #include <linux/mmc/sd.h>
19 #include <linux/mmc/sdio.h>
159 struct mmc_host *mmc; member
169 size_t offset; /* offset within a page, including sg->offset */
211 iowrite32(data, host->base + reg); in usdhi6_write()
212 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, in usdhi6_write()
213 host->base, reg, data); in usdhi6_write()
218 iowrite16(data, host->base + reg); in usdhi6_write16()
219 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, in usdhi6_write16()
220 host->base, reg, data); in usdhi6_write16()
225 u32 data = ioread32(host->base + reg); in usdhi6_read()
226 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, in usdhi6_read()
227 host->base, reg, data); in usdhi6_read()
233 u16 data = ioread16(host->base + reg); in usdhi6_read16()
234 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, in usdhi6_read16()
235 host->base, reg, data); in usdhi6_read16()
241 host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1; in usdhi6_irq_enable()
242 host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2; in usdhi6_irq_enable()
243 usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask); in usdhi6_irq_enable()
244 usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask); in usdhi6_irq_enable()
278 if (host->io_error & in usdhi6_error_code()
281 int opc = host->mrq ? host->mrq->cmd->opcode : -1; in usdhi6_error_code()
285 if (host->wait == USDHI6_WAIT_FOR_CMD) in usdhi6_error_code()
286 dev_dbg(mmc_dev(host->mmc), in usdhi6_error_code()
287 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", in usdhi6_error_code()
288 err, rsp54, host->wait, opc); in usdhi6_error_code()
290 dev_warn(mmc_dev(host->mmc), in usdhi6_error_code()
291 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", in usdhi6_error_code()
292 err, rsp54, host->wait, opc); in usdhi6_error_code()
293 return -ETIMEDOUT; in usdhi6_error_code()
298 dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n", in usdhi6_error_code()
299 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1); in usdhi6_error_code()
300 if (host->io_error & USDHI6_SD_INFO2_ILA) in usdhi6_error_code()
301 return -EILSEQ; in usdhi6_error_code()
303 return -EIO; in usdhi6_error_code()
306 /* Scatter-Gather management */
310 * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
317 struct mmc_data *data = host->mrq->data; in usdhi6_blk_bounce()
318 size_t blk_head = host->head_len; in usdhi6_blk_bounce()
320 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n", in usdhi6_blk_bounce()
321 __func__, host->mrq->cmd->opcode, data->sg_len, in usdhi6_blk_bounce()
322 data->blksz, data->blocks, sg->offset); in usdhi6_blk_bounce()
324 host->head_pg.page = host->pg.page; in usdhi6_blk_bounce()
325 host->head_pg.mapped = host->pg.mapped; in usdhi6_blk_bounce()
326 host->pg.page = host->pg.page + 1; in usdhi6_blk_bounce()
327 host->pg.mapped = kmap(host->pg.page); in usdhi6_blk_bounce()
329 host->blk_page = host->bounce_buf; in usdhi6_blk_bounce()
330 host->offset = 0; in usdhi6_blk_bounce()
332 if (data->flags & MMC_DATA_READ) in usdhi6_blk_bounce()
335 memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head, in usdhi6_blk_bounce()
337 memcpy(host->bounce_buf + blk_head, host->pg.mapped, in usdhi6_blk_bounce()
338 data->blksz - blk_head); in usdhi6_blk_bounce()
344 struct mmc_request *mrq = host->mrq; in usdhi6_sg_prep()
345 struct mmc_data *data = mrq->data; in usdhi6_sg_prep()
347 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks); in usdhi6_sg_prep()
349 host->sg = data->sg; in usdhi6_sg_prep()
351 host->offset = host->sg->offset; in usdhi6_sg_prep()
357 struct mmc_data *data = host->mrq->data; in usdhi6_sg_map()
358 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg; in usdhi6_sg_map()
359 size_t head = PAGE_SIZE - sg->offset; in usdhi6_sg_map()
360 size_t blk_head = head % data->blksz; in usdhi6_sg_map()
362 WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page); in usdhi6_sg_map()
363 if (WARN(sg_dma_len(sg) % data->blksz, in usdhi6_sg_map()
365 sg_dma_len(sg), data->blksz)) in usdhi6_sg_map()
368 host->pg.page = sg_page(sg); in usdhi6_sg_map()
369 host->pg.mapped = kmap(host->pg.page); in usdhi6_sg_map()
370 host->offset = sg->offset; in usdhi6_sg_map()
373 * Block size must be a power of 2 for multi-block transfers, in usdhi6_sg_map()
376 host->head_len = blk_head; in usdhi6_sg_map()
378 if (head < data->blksz) in usdhi6_sg_map()
385 host->blk_page = host->pg.mapped; in usdhi6_sg_map()
387 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n", in usdhi6_sg_map()
388 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, in usdhi6_sg_map()
389 sg->offset, host->mrq->cmd->opcode, host->mrq); in usdhi6_sg_map()
391 return host->blk_page + host->offset; in usdhi6_sg_map()
397 struct mmc_data *data = host->mrq->data; in usdhi6_sg_unmap()
398 struct page *page = host->head_pg.page; in usdhi6_sg_unmap()
401 /* Previous block was cross-page boundary */ in usdhi6_sg_unmap()
402 struct scatterlist *sg = data->sg_len > 1 ? in usdhi6_sg_unmap()
403 host->sg : data->sg; in usdhi6_sg_unmap()
404 size_t blk_head = host->head_len; in usdhi6_sg_unmap()
406 if (!data->error && data->flags & MMC_DATA_READ) { in usdhi6_sg_unmap()
407 memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head, in usdhi6_sg_unmap()
408 host->bounce_buf, blk_head); in usdhi6_sg_unmap()
409 memcpy(host->pg.mapped, host->bounce_buf + blk_head, in usdhi6_sg_unmap()
410 data->blksz - blk_head); in usdhi6_sg_unmap()
416 host->head_pg.page = NULL; in usdhi6_sg_unmap()
418 if (!force && sg_dma_len(sg) + sg->offset > in usdhi6_sg_unmap()
419 (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head) in usdhi6_sg_unmap()
424 page = host->pg.page; in usdhi6_sg_unmap()
431 host->pg.page = NULL; in usdhi6_sg_unmap()
437 struct mmc_data *data = host->mrq->data; in usdhi6_sg_advance()
441 if (host->head_pg.page) { in usdhi6_sg_advance()
442 /* Finished a cross-page block, jump to the new page */ in usdhi6_sg_advance()
443 host->page_idx++; in usdhi6_sg_advance()
444 host->offset = data->blksz - host->head_len; in usdhi6_sg_advance()
445 host->blk_page = host->pg.mapped; in usdhi6_sg_advance()
448 host->offset += data->blksz; in usdhi6_sg_advance()
450 if (host->offset == PAGE_SIZE) { in usdhi6_sg_advance()
452 host->offset = 0; in usdhi6_sg_advance()
453 host->page_idx++; in usdhi6_sg_advance()
458 * Now host->blk_page + host->offset point at the end of our last block in usdhi6_sg_advance()
459 * and host->page_idx is the index of the page, in which our new block in usdhi6_sg_advance()
463 done = (host->page_idx << PAGE_SHIFT) + host->offset; in usdhi6_sg_advance()
464 total = host->sg->offset + sg_dma_len(host->sg); in usdhi6_sg_advance()
466 dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__, in usdhi6_sg_advance()
467 done, total, host->offset); in usdhi6_sg_advance()
469 if (done < total && host->offset) { in usdhi6_sg_advance()
471 if (host->offset + data->blksz > PAGE_SIZE) in usdhi6_sg_advance()
473 usdhi6_blk_bounce(host, host->sg); in usdhi6_sg_advance()
487 struct scatterlist *next = sg_next(host->sg); in usdhi6_sg_advance()
489 host->page_idx = 0; in usdhi6_sg_advance()
492 host->wait = USDHI6_WAIT_FOR_DATA_END; in usdhi6_sg_advance()
493 host->sg = next; in usdhi6_sg_advance()
495 if (WARN(next && sg_dma_len(next) % data->blksz, in usdhi6_sg_advance()
497 sg_dma_len(next), data->blksz)) in usdhi6_sg_advance()
498 data->error = -EINVAL; in usdhi6_sg_advance()
506 host->pg.page = sg_page(host->sg) + host->page_idx; in usdhi6_sg_advance()
507 host->pg.mapped = kmap(host->pg.page); in usdhi6_sg_advance()
508 host->blk_page = host->pg.mapped; in usdhi6_sg_advance()
510 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n", in usdhi6_sg_advance()
511 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, in usdhi6_sg_advance()
512 host->mrq->cmd->opcode, host->mrq); in usdhi6_sg_advance()
519 host->dma_active = false; in usdhi6_dma_release()
520 if (host->chan_tx) { in usdhi6_dma_release()
521 struct dma_chan *chan = host->chan_tx; in usdhi6_dma_release()
522 host->chan_tx = NULL; in usdhi6_dma_release()
525 if (host->chan_rx) { in usdhi6_dma_release()
526 struct dma_chan *chan = host->chan_rx; in usdhi6_dma_release()
527 host->chan_rx = NULL; in usdhi6_dma_release()
534 struct mmc_data *data = host->mrq->data; in usdhi6_dma_stop_unmap()
536 if (!host->dma_active) in usdhi6_dma_stop_unmap()
540 host->dma_active = false; in usdhi6_dma_stop_unmap()
542 if (data->flags & MMC_DATA_READ) in usdhi6_dma_stop_unmap()
543 dma_unmap_sg(host->chan_rx->device->dev, data->sg, in usdhi6_dma_stop_unmap()
544 data->sg_len, DMA_FROM_DEVICE); in usdhi6_dma_stop_unmap()
546 dma_unmap_sg(host->chan_tx->device->dev, data->sg, in usdhi6_dma_stop_unmap()
547 data->sg_len, DMA_TO_DEVICE); in usdhi6_dma_stop_unmap()
553 struct mmc_request *mrq = host->mrq; in usdhi6_dma_complete()
555 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n", in usdhi6_dma_complete()
556 dev_name(mmc_dev(host->mmc)), mrq)) in usdhi6_dma_complete()
559 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__, in usdhi6_dma_complete()
560 mrq->cmd->opcode); in usdhi6_dma_complete()
563 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); in usdhi6_dma_complete()
569 struct mmc_data *data = host->mrq->data; in usdhi6_dma_setup()
570 struct scatterlist *sg = data->sg; in usdhi6_dma_setup()
572 dma_cookie_t cookie = -EINVAL; in usdhi6_dma_setup()
584 return -EINVAL; in usdhi6_dma_setup()
587 ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir); in usdhi6_dma_setup()
589 host->dma_active = true; in usdhi6_dma_setup()
595 desc->callback = usdhi6_dma_complete; in usdhi6_dma_setup()
596 desc->callback_param = host; in usdhi6_dma_setup()
600 dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n", in usdhi6_dma_setup()
601 __func__, data->sg_len, ret, cookie, desc); in usdhi6_dma_setup()
608 dev_warn(mmc_dev(host->mmc), in usdhi6_dma_setup()
617 if (!host->chan_rx || !host->chan_tx) in usdhi6_dma_start()
618 return -ENODEV; in usdhi6_dma_start()
620 if (host->mrq->data->flags & MMC_DATA_READ) in usdhi6_dma_start()
621 return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM); in usdhi6_dma_start()
623 return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV); in usdhi6_dma_start()
628 struct mmc_data *data = host->mrq->data; in usdhi6_dma_kill()
630 dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n", in usdhi6_dma_kill()
631 __func__, data->sg_len, data->blocks, data->blksz); in usdhi6_dma_kill()
633 if (data->flags & MMC_DATA_READ) in usdhi6_dma_kill()
634 dmaengine_terminate_sync(host->chan_rx); in usdhi6_dma_kill()
636 dmaengine_terminate_sync(host->chan_tx); in usdhi6_dma_kill()
641 struct mmc_data *data = host->mrq->data; in usdhi6_dma_check_error()
643 dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n", in usdhi6_dma_check_error()
644 __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1)); in usdhi6_dma_check_error()
646 if (host->io_error) { in usdhi6_dma_check_error()
647 data->error = usdhi6_error_code(host); in usdhi6_dma_check_error()
648 data->bytes_xfered = 0; in usdhi6_dma_check_error()
651 dev_warn(mmc_dev(host->mmc), in usdhi6_dma_check_error()
652 "DMA failed: %d, falling back to PIO\n", data->error); in usdhi6_dma_check_error()
661 if (host->irq_status & USDHI6_SD_INFO1_RSP_END) in usdhi6_dma_check_error()
662 dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n"); in usdhi6_dma_check_error()
667 if (host->mrq->data->flags & MMC_DATA_READ) in usdhi6_dma_kick()
668 dma_async_issue_pending(host->chan_rx); in usdhi6_dma_kick()
670 dma_async_issue_pending(host->chan_tx); in usdhi6_dma_kick()
681 host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); in usdhi6_dma_request()
682 dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, in usdhi6_dma_request()
683 host->chan_tx); in usdhi6_dma_request()
685 if (IS_ERR(host->chan_tx)) { in usdhi6_dma_request()
686 host->chan_tx = NULL; in usdhi6_dma_request()
694 ret = dmaengine_slave_config(host->chan_tx, &cfg); in usdhi6_dma_request()
698 host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); in usdhi6_dma_request()
699 dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, in usdhi6_dma_request()
700 host->chan_rx); in usdhi6_dma_request()
702 if (IS_ERR(host->chan_rx)) { in usdhi6_dma_request()
703 host->chan_rx = NULL; in usdhi6_dma_request()
711 ret = dmaengine_slave_config(host->chan_rx, &cfg); in usdhi6_dma_request()
718 dma_release_channel(host->chan_rx); in usdhi6_dma_request()
719 host->chan_rx = NULL; in usdhi6_dma_request()
721 dma_release_channel(host->chan_tx); in usdhi6_dma_request()
722 host->chan_tx = NULL; in usdhi6_dma_request()
729 unsigned long rate = ios->clock; in usdhi6_clk_set()
733 for (i = 1000; i; i--) { in usdhi6_clk_set()
740 dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n"); in usdhi6_clk_set()
749 if (host->imclk <= rate) { in usdhi6_clk_set()
750 if (ios->timing != MMC_TIMING_UHS_DDR50) { in usdhi6_clk_set()
751 /* Cannot have 1-to-1 clock in DDR mode */ in usdhi6_clk_set()
752 new_rate = host->imclk; in usdhi6_clk_set()
755 new_rate = host->imclk / 2; in usdhi6_clk_set()
759 roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate)); in usdhi6_clk_set()
761 new_rate = host->imclk / div; in usdhi6_clk_set()
764 if (host->rate == new_rate) in usdhi6_clk_set()
767 host->rate = new_rate; in usdhi6_clk_set()
769 dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n", in usdhi6_clk_set()
777 if (host->imclk == rate || host->imclk == host->rate || !rate) in usdhi6_clk_set()
782 host->rate = 0; in usdhi6_clk_set()
788 if (host->imclk == rate || host->imclk == host->rate || in usdhi6_clk_set()
796 struct mmc_host *mmc = host->mmc; in usdhi6_set_power() local
798 if (!IS_ERR(mmc->supply.vmmc)) in usdhi6_set_power()
800 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, in usdhi6_set_power()
801 ios->power_mode ? ios->vdd : 0); in usdhi6_set_power()
811 for (i = 1000; i; i--) in usdhi6_reset()
815 return i ? 0 : -ETIMEDOUT; in usdhi6_reset()
818 static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) in usdhi6_set_ios() argument
820 struct usdhi6_host *host = mmc_priv(mmc); in usdhi6_set_ios()
824 dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n", in usdhi6_set_ios()
825 ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing); in usdhi6_set_ios()
827 switch (ios->power_mode) { in usdhi6_set_ios()
839 dev_err(mmc_dev(mmc), "Cannot reset the interface!\n"); in usdhi6_set_ios()
852 if (ios->bus_width == MMC_BUS_WIDTH_1) { in usdhi6_set_ios()
853 if (ios->timing == MMC_TIMING_UHS_DDR50) in usdhi6_set_ios()
854 dev_err(mmc_dev(mmc), in usdhi6_set_ios()
860 mode = ios->timing == MMC_TIMING_UHS_DDR50; in usdhi6_set_ios()
867 if (host->rate != ios->clock) in usdhi6_set_ios()
874 struct mmc_request *mrq = host->mrq; in usdhi6_timeout_set()
878 if (!mrq->data) in usdhi6_timeout_set()
879 ticks = host->rate / 1000 * mrq->cmd->busy_timeout; in usdhi6_timeout_set()
881 ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) + in usdhi6_timeout_set()
882 mrq->data->timeout_clks; in usdhi6_timeout_set()
891 val = order_base_2(ticks) - 13; in usdhi6_timeout_set()
893 dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n", in usdhi6_timeout_set()
894 mrq->data ? "data" : "cmd", ticks, host->rate); in usdhi6_timeout_set()
903 struct mmc_request *mrq = host->mrq; in usdhi6_request_done()
904 struct mmc_data *data = mrq->data; in usdhi6_request_done()
906 if (WARN(host->pg.page || host->head_pg.page, in usdhi6_request_done()
908 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode, in usdhi6_request_done()
909 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-', in usdhi6_request_done()
910 data ? host->offset : 0, data ? data->blocks : 0, in usdhi6_request_done()
911 data ? data->blksz : 0, data ? data->sg_len : 0)) in usdhi6_request_done()
914 if (mrq->cmd->error || in usdhi6_request_done()
915 (data && data->error) || in usdhi6_request_done()
916 (mrq->stop && mrq->stop->error)) in usdhi6_request_done()
917 dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n", in usdhi6_request_done()
918 __func__, mrq->cmd->opcode, data ? data->blocks : 0, in usdhi6_request_done()
919 data ? data->blksz : 0, in usdhi6_request_done()
920 mrq->cmd->error, in usdhi6_request_done()
921 data ? data->error : 1, in usdhi6_request_done()
922 mrq->stop ? mrq->stop->error : 1); in usdhi6_request_done()
926 host->wait = USDHI6_WAIT_FOR_REQUEST; in usdhi6_request_done()
927 host->mrq = NULL; in usdhi6_request_done()
929 mmc_request_done(host->mmc, mrq); in usdhi6_request_done()
934 struct mmc_request *mrq = host->mrq; in usdhi6_cmd_flags()
935 struct mmc_command *cmd = mrq->cmd; in usdhi6_cmd_flags()
936 u16 opc = cmd->opcode; in usdhi6_cmd_flags()
938 if (host->app_cmd) { in usdhi6_cmd_flags()
939 host->app_cmd = false; in usdhi6_cmd_flags()
943 if (mrq->data) { in usdhi6_cmd_flags()
946 if (mrq->data->flags & MMC_DATA_READ) in usdhi6_cmd_flags()
949 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || in usdhi6_cmd_flags()
950 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || in usdhi6_cmd_flags()
951 (cmd->opcode == SD_IO_RW_EXTENDED && in usdhi6_cmd_flags()
952 mrq->data->blocks > 1)) { in usdhi6_cmd_flags()
954 if (!mrq->stop) in usdhi6_cmd_flags()
975 dev_warn(mmc_dev(host->mmc), in usdhi6_cmd_flags()
978 return -EINVAL; in usdhi6_cmd_flags()
987 struct mmc_request *mrq = host->mrq; in usdhi6_rq_start()
988 struct mmc_command *cmd = mrq->cmd; in usdhi6_rq_start()
989 struct mmc_data *data = mrq->data; in usdhi6_rq_start()
996 for (i = 1000; i; i--) { in usdhi6_rq_start()
1003 dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n"); in usdhi6_rq_start()
1004 return -EAGAIN; in usdhi6_rq_start()
1011 host->page_idx = 0; in usdhi6_rq_start()
1013 if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) { in usdhi6_rq_start()
1014 switch (data->blksz) { in usdhi6_rq_start()
1021 if (mrq->stop) in usdhi6_rq_start()
1022 ret = -EINVAL; in usdhi6_rq_start()
1025 ret = -EINVAL; in usdhi6_rq_start()
1027 } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK || in usdhi6_rq_start()
1028 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) && in usdhi6_rq_start()
1029 data->blksz != 512) { in usdhi6_rq_start()
1030 ret = -EINVAL; in usdhi6_rq_start()
1034 dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n", in usdhi6_rq_start()
1035 __func__, data->blocks, data->blksz); in usdhi6_rq_start()
1036 return -EINVAL; in usdhi6_rq_start()
1039 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || in usdhi6_rq_start()
1040 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || in usdhi6_rq_start()
1041 (cmd->opcode == SD_IO_RW_EXTENDED && in usdhi6_rq_start()
1042 data->blocks > 1)) in usdhi6_rq_start()
1045 usdhi6_write(host, USDHI6_SD_SIZE, data->blksz); in usdhi6_rq_start()
1047 if ((data->blksz >= USDHI6_MIN_DMA || in usdhi6_rq_start()
1048 data->blocks > 1) && in usdhi6_rq_start()
1049 (data->blksz % 4 || in usdhi6_rq_start()
1050 data->sg->offset % 4)) in usdhi6_rq_start()
1051 dev_dbg(mmc_dev(host->mmc), in usdhi6_rq_start()
1052 "Bad SG of %u: %ux%u @ %u\n", data->sg_len, in usdhi6_rq_start()
1053 data->blksz, data->blocks, data->sg->offset); in usdhi6_rq_start()
1056 use_dma = data->blksz >= USDHI6_MIN_DMA && in usdhi6_rq_start()
1057 !(data->blksz % 4) && in usdhi6_rq_start()
1063 dev_dbg(mmc_dev(host->mmc), in usdhi6_rq_start()
1065 __func__, cmd->opcode, data->blocks, data->blksz, in usdhi6_rq_start()
1066 data->sg_len, use_dma ? "DMA" : "PIO", in usdhi6_rq_start()
1067 data->flags & MMC_DATA_READ ? "read" : "write", in usdhi6_rq_start()
1068 data->sg->offset, mrq->stop ? " + stop" : ""); in usdhi6_rq_start()
1070 dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n", in usdhi6_rq_start()
1071 __func__, cmd->opcode); in usdhi6_rq_start()
1077 host->wait = USDHI6_WAIT_FOR_CMD; in usdhi6_rq_start()
1078 schedule_delayed_work(&host->timeout_work, host->timeout); in usdhi6_rq_start()
1082 data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0); in usdhi6_rq_start()
1083 usdhi6_write(host, USDHI6_SD_ARG, cmd->arg); in usdhi6_rq_start()
1091 static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq) in usdhi6_request() argument
1093 struct usdhi6_host *host = mmc_priv(mmc); in usdhi6_request()
1096 cancel_delayed_work_sync(&host->timeout_work); in usdhi6_request()
1098 host->mrq = mrq; in usdhi6_request()
1099 host->sg = NULL; in usdhi6_request()
1104 mrq->cmd->error = ret; in usdhi6_request()
1109 static int usdhi6_get_cd(struct mmc_host *mmc) in usdhi6_get_cd() argument
1111 struct usdhi6_host *host = mmc_priv(mmc); in usdhi6_get_cd()
1122 return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); in usdhi6_get_cd()
1125 static int usdhi6_get_ro(struct mmc_host *mmc) in usdhi6_get_ro() argument
1127 struct usdhi6_host *host = mmc_priv(mmc); in usdhi6_get_ro()
1132 * level status.WP RO_ACTIVE_HIGH card read-only in usdhi6_get_ro()
1138 return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); in usdhi6_get_ro()
1141 static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable) in usdhi6_enable_sdio_irq() argument
1143 struct usdhi6_host *host = mmc_priv(mmc); in usdhi6_enable_sdio_irq()
1145 dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis"); in usdhi6_enable_sdio_irq()
1148 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ; in usdhi6_enable_sdio_irq()
1149 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask); in usdhi6_enable_sdio_irq()
1154 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ; in usdhi6_enable_sdio_irq()
1160 if (IS_ERR(host->pins_uhs)) in usdhi6_set_pinstates()
1166 return pinctrl_select_state(host->pinctrl, in usdhi6_set_pinstates()
1167 host->pins_uhs); in usdhi6_set_pinstates()
1170 return pinctrl_select_default_state(mmc_dev(host->mmc)); in usdhi6_set_pinstates()
1174 static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) in usdhi6_sig_volt_switch() argument
1178 ret = mmc_regulator_set_vqmmc(mmc, ios); in usdhi6_sig_volt_switch()
1182 ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage); in usdhi6_sig_volt_switch()
1184 dev_warn_once(mmc_dev(mmc), in usdhi6_sig_volt_switch()
1189 static int usdhi6_card_busy(struct mmc_host *mmc) in usdhi6_card_busy() argument
1191 struct usdhi6_host *host = mmc_priv(mmc); in usdhi6_card_busy()
1212 struct mmc_command *cmd = host->mrq->stop; in usdhi6_resp_cmd12()
1213 cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10); in usdhi6_resp_cmd12()
1218 struct mmc_command *cmd = host->mrq->cmd; in usdhi6_resp_read()
1219 u32 *rsp = cmd->resp, tmp = 0; in usdhi6_resp_read()
1223 * RSP10 39-8 in usdhi6_resp_read()
1224 * RSP32 71-40 in usdhi6_resp_read()
1225 * RSP54 103-72 in usdhi6_resp_read()
1226 * RSP76 127-104 in usdhi6_resp_read()
1227 * R2-type response: in usdhi6_resp_read()
1239 if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) { in usdhi6_resp_read()
1240 dev_err(mmc_dev(host->mmc), in usdhi6_resp_read()
1241 "CMD%d: response expected but is missing!\n", cmd->opcode); in usdhi6_resp_read()
1248 rsp[3 - i] = tmp >> 24; in usdhi6_resp_read()
1250 rsp[3 - i] |= tmp << 8; in usdhi6_resp_read()
1252 else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || in usdhi6_resp_read()
1253 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) in usdhi6_resp_read()
1259 dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]); in usdhi6_resp_read()
1264 struct mmc_data *data = host->mrq->data; in usdhi6_blk_read()
1268 if (host->io_error) { in usdhi6_blk_read()
1269 data->error = usdhi6_error_code(host); in usdhi6_blk_read()
1273 if (host->pg.page) { in usdhi6_blk_read()
1274 p = host->blk_page + host->offset; in usdhi6_blk_read()
1278 data->error = -ENOMEM; in usdhi6_blk_read()
1283 for (i = 0; i < data->blksz / 4; i++, p++) in usdhi6_blk_read()
1286 rest = data->blksz % 4; in usdhi6_blk_read()
1297 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); in usdhi6_blk_read()
1298 host->wait = USDHI6_WAIT_FOR_REQUEST; in usdhi6_blk_read()
1299 return data->error; in usdhi6_blk_read()
1304 struct mmc_data *data = host->mrq->data; in usdhi6_blk_write()
1308 if (host->io_error) { in usdhi6_blk_write()
1309 data->error = usdhi6_error_code(host); in usdhi6_blk_write()
1313 if (host->pg.page) { in usdhi6_blk_write()
1314 p = host->blk_page + host->offset; in usdhi6_blk_write()
1318 data->error = -ENOMEM; in usdhi6_blk_write()
1323 for (i = 0; i < data->blksz / 4; i++, p++) in usdhi6_blk_write()
1326 rest = data->blksz % 4; in usdhi6_blk_write()
1340 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); in usdhi6_blk_write()
1341 host->wait = USDHI6_WAIT_FOR_REQUEST; in usdhi6_blk_write()
1342 return data->error; in usdhi6_blk_write()
1347 struct mmc_request *mrq = host->mrq; in usdhi6_stop_cmd()
1349 switch (mrq->cmd->opcode) { in usdhi6_stop_cmd()
1352 if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) { in usdhi6_stop_cmd()
1353 host->wait = USDHI6_WAIT_FOR_STOP; in usdhi6_stop_cmd()
1358 dev_err(mmc_dev(host->mmc), in usdhi6_stop_cmd()
1360 mrq->stop->opcode, mrq->cmd->opcode); in usdhi6_stop_cmd()
1361 mrq->stop->error = -EOPNOTSUPP; in usdhi6_stop_cmd()
1364 return -EOPNOTSUPP; in usdhi6_stop_cmd()
1369 struct mmc_request *mrq = host->mrq; in usdhi6_end_cmd()
1370 struct mmc_command *cmd = mrq->cmd; in usdhi6_end_cmd()
1372 if (host->io_error) { in usdhi6_end_cmd()
1373 cmd->error = usdhi6_error_code(host); in usdhi6_end_cmd()
1379 if (!mrq->data) in usdhi6_end_cmd()
1382 if (host->dma_active) { in usdhi6_end_cmd()
1384 if (!mrq->stop) in usdhi6_end_cmd()
1385 host->wait = USDHI6_WAIT_FOR_DMA; in usdhi6_end_cmd()
1388 } else if (mrq->data->flags & MMC_DATA_READ) { in usdhi6_end_cmd()
1389 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || in usdhi6_end_cmd()
1390 (cmd->opcode == SD_IO_RW_EXTENDED && in usdhi6_end_cmd()
1391 mrq->data->blocks > 1)) in usdhi6_end_cmd()
1392 host->wait = USDHI6_WAIT_FOR_MREAD; in usdhi6_end_cmd()
1394 host->wait = USDHI6_WAIT_FOR_READ; in usdhi6_end_cmd()
1396 if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || in usdhi6_end_cmd()
1397 (cmd->opcode == SD_IO_RW_EXTENDED && in usdhi6_end_cmd()
1398 mrq->data->blocks > 1)) in usdhi6_end_cmd()
1399 host->wait = USDHI6_WAIT_FOR_MWRITE; in usdhi6_end_cmd()
1401 host->wait = USDHI6_WAIT_FOR_WRITE; in usdhi6_end_cmd()
1414 * cross-page, in which case for single-block IO host->page_idx == 0. in usdhi6_read_block()
1422 host->wait = USDHI6_WAIT_FOR_DATA_END; in usdhi6_read_block()
1435 return !host->mrq->data->error && in usdhi6_mread_block()
1436 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); in usdhi6_mread_block()
1449 host->wait = USDHI6_WAIT_FOR_DATA_END; in usdhi6_write_block()
1462 return !host->mrq->data->error && in usdhi6_mwrite_block()
1463 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); in usdhi6_mwrite_block()
1476 cancel_delayed_work_sync(&host->timeout_work); in usdhi6_sd_bh()
1478 mrq = host->mrq; in usdhi6_sd_bh()
1482 cmd = mrq->cmd; in usdhi6_sd_bh()
1483 data = mrq->data; in usdhi6_sd_bh()
1485 switch (host->wait) { in usdhi6_sd_bh()
1514 if (host->io_error) { in usdhi6_sd_bh()
1516 if (mrq->stop) in usdhi6_sd_bh()
1517 mrq->stop->error = ret; in usdhi6_sd_bh()
1519 mrq->data->error = ret; in usdhi6_sd_bh()
1520 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret); in usdhi6_sd_bh()
1524 mrq->stop->error = 0; in usdhi6_sd_bh()
1527 if (host->io_error) { in usdhi6_sd_bh()
1528 mrq->data->error = usdhi6_error_code(host); in usdhi6_sd_bh()
1529 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, in usdhi6_sd_bh()
1530 mrq->data->error); in usdhi6_sd_bh()
1534 cmd->error = -EFAULT; in usdhi6_sd_bh()
1535 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); in usdhi6_sd_bh()
1541 schedule_delayed_work(&host->timeout_work, host->timeout); in usdhi6_sd_bh()
1543 if (!host->dma_active) in usdhi6_sd_bh()
1544 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); in usdhi6_sd_bh()
1548 if (!cmd->error) { in usdhi6_sd_bh()
1550 if (!data->error) { in usdhi6_sd_bh()
1551 if (host->wait != USDHI6_WAIT_FOR_STOP && in usdhi6_sd_bh()
1552 host->mrq->stop && in usdhi6_sd_bh()
1553 !host->mrq->stop->error && in usdhi6_sd_bh()
1558 schedule_delayed_work(&host->timeout_work, in usdhi6_sd_bh()
1559 host->timeout); in usdhi6_sd_bh()
1564 data->bytes_xfered = data->blocks * data->blksz; in usdhi6_sd_bh()
1567 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n", in usdhi6_sd_bh()
1568 __func__, data->error); in usdhi6_sd_bh()
1571 } else if (cmd->opcode == MMC_APP_CMD) { in usdhi6_sd_bh()
1572 host->app_cmd = true; in usdhi6_sd_bh()
1586 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & in usdhi6_sd()
1588 status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask; in usdhi6_sd()
1592 dev_dbg(mmc_dev(host->mmc), in usdhi6_sd()
1614 host->io_error = error; in usdhi6_sd()
1615 host->irq_status = status; in usdhi6_sd()
1619 if (host->wait != USDHI6_WAIT_FOR_CMD || in usdhi6_sd()
1621 dev_warn(mmc_dev(host->mmc), in usdhi6_sd()
1625 dev_dbg(mmc_dev(host->mmc), in usdhi6_sd()
1636 u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask; in usdhi6_sdio()
1638 dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status); in usdhi6_sdio()
1645 mmc_signal_sdio_irq(host->mmc); in usdhi6_sdio()
1653 struct mmc_host *mmc = host->mmc; in usdhi6_cd() local
1657 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & in usdhi6_cd()
1666 if (!work_pending(&mmc->detect.work) && in usdhi6_cd()
1668 !mmc->card) || in usdhi6_cd()
1670 mmc->card))) in usdhi6_cd()
1671 mmc_detect_change(mmc, msecs_to_jiffies(100)); in usdhi6_cd()
1677 * Actually this should not be needed, if the built-in timeout works reliably in
1685 struct mmc_request *mrq = host->mrq; in usdhi6_timeout_work()
1686 struct mmc_data *data = mrq ? mrq->data : NULL; in usdhi6_timeout_work()
1689 dev_warn(mmc_dev(host->mmc), in usdhi6_timeout_work()
1691 host->dma_active ? "DMA" : "PIO", in usdhi6_timeout_work()
1692 host->wait, mrq ? mrq->cmd->opcode : -1, in usdhi6_timeout_work()
1694 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status); in usdhi6_timeout_work()
1696 if (host->dma_active) { in usdhi6_timeout_work()
1701 switch (host->wait) { in usdhi6_timeout_work()
1703 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); in usdhi6_timeout_work()
1708 mrq->cmd->error = -ETIMEDOUT; in usdhi6_timeout_work()
1712 mrq->stop->error = -ETIMEDOUT; in usdhi6_timeout_work()
1719 sg = host->sg ?: data->sg; in usdhi6_timeout_work()
1720 dev_dbg(mmc_dev(host->mmc), in usdhi6_timeout_work()
1722 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, in usdhi6_timeout_work()
1723 host->offset, data->blocks, data->blksz, data->sg_len, in usdhi6_timeout_work()
1724 sg_dma_len(sg), sg->offset); in usdhi6_timeout_work()
1729 data->error = -ETIMEDOUT; in usdhi6_timeout_work()
1746 struct device *dev = &pdev->dev; in usdhi6_probe()
1747 struct mmc_host *mmc; in usdhi6_probe() local
1754 if (!dev->of_node) in usdhi6_probe()
1755 return -ENODEV; in usdhi6_probe()
1765 mmc = devm_mmc_alloc_host(dev, sizeof(*host)); in usdhi6_probe()
1766 if (!mmc) in usdhi6_probe()
1767 return -ENOMEM; in usdhi6_probe()
1769 ret = mmc_regulator_get_supply(mmc); in usdhi6_probe()
1773 ret = mmc_of_parse(mmc); in usdhi6_probe()
1777 host = mmc_priv(mmc); in usdhi6_probe()
1778 host->mmc = mmc; in usdhi6_probe()
1779 host->wait = USDHI6_WAIT_FOR_REQUEST; in usdhi6_probe()
1780 host->timeout = msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS); in usdhi6_probe()
1783 * future improvement should instead respect the cmd->busy_timeout. in usdhi6_probe()
1785 mmc->max_busy_timeout = USDHI6_REQ_TIMEOUT_MS; in usdhi6_probe()
1787 host->pinctrl = devm_pinctrl_get(&pdev->dev); in usdhi6_probe()
1788 if (IS_ERR(host->pinctrl)) in usdhi6_probe()
1789 return PTR_ERR(host->pinctrl); in usdhi6_probe()
1791 host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); in usdhi6_probe()
1793 host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in usdhi6_probe()
1794 if (IS_ERR(host->base)) in usdhi6_probe()
1795 return PTR_ERR(host->base); in usdhi6_probe()
1797 host->clk = devm_clk_get(dev, NULL); in usdhi6_probe()
1798 if (IS_ERR(host->clk)) in usdhi6_probe()
1799 return PTR_ERR(host->clk); in usdhi6_probe()
1801 host->imclk = clk_get_rate(host->clk); in usdhi6_probe()
1803 ret = clk_prepare_enable(host->clk); in usdhi6_probe()
1809 ret = -EPERM; in usdhi6_probe()
1825 mmc->caps |= MMC_CAP_NEEDS_POLL; in usdhi6_probe()
1838 INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work); in usdhi6_probe()
1840 usdhi6_dma_request(host, res->start); in usdhi6_probe()
1842 mmc->ops = &usdhi6_ops; in usdhi6_probe()
1843 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | in usdhi6_probe()
1846 mmc->max_segs = 32; in usdhi6_probe()
1847 mmc->max_blk_size = 512; in usdhi6_probe()
1848 mmc->max_req_size = PAGE_SIZE * mmc->max_segs; in usdhi6_probe()
1849 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; in usdhi6_probe()
1851 * Setting .max_seg_size to 1 page would simplify our page-mapping code, in usdhi6_probe()
1855 * run-time and we fall back to PIO, we will continue getting large in usdhi6_probe()
1858 mmc->max_seg_size = mmc->max_req_size; in usdhi6_probe()
1859 if (!mmc->f_max) in usdhi6_probe()
1860 mmc->f_max = host->imclk; in usdhi6_probe()
1861 mmc->f_min = host->imclk / 512; in usdhi6_probe()
1865 ret = mmc_add_host(mmc); in usdhi6_probe()
1874 clk_disable_unprepare(host->clk); in usdhi6_probe()
1882 mmc_remove_host(host->mmc); in usdhi6_remove()
1885 cancel_delayed_work_sync(&host->timeout_work); in usdhi6_remove()
1887 clk_disable_unprepare(host->clk); in usdhi6_remove()