Lines Matching +full:mmc +full:- +full:host
2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
9 * Copyright (C) 2012-2017 Cavium Inc.
18 #include <linux/dma-direction.h>
19 #include <linux/dma-mapping.h>
22 #include <linux/mmc/mmc.h>
23 #include <linux/mmc/slot-gpio.h>
32 "MMC Buffer",
33 "MMC Command",
34 "MMC DMA",
35 "MMC Command Error",
36 "MMC DMA Error",
37 "MMC Switch",
38 "MMC Switch Error",
39 "MMC DMA int Fifo",
40 "MMC DMA int",
44 * The Cavium MMC host hardware assumes that all commands have fixed
45 * command and response types. These are correct if MMC devices are
46 * being used. However, non-MMC devices like SD use command and
47 * response types that are unexpected by the host hardware.
51 * from the values in this table and the flags passed from the MMC
128 cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f); in cvm_mmc_get_cr_mods()
129 hardware_ctype = cr->ctype; in cvm_mmc_get_cr_mods()
130 hardware_rtype = cr->rtype; in cvm_mmc_get_cr_mods()
131 if (cmd->opcode == MMC_GEN_CMD) in cvm_mmc_get_cr_mods()
132 hardware_ctype = (cmd->arg & 1) ? 1 : 2; in cvm_mmc_get_cr_mods()
136 desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1; in cvm_mmc_get_cr_mods()
165 static void check_switch_errors(struct cvm_mmc_host *host) in check_switch_errors() argument
169 emm_switch = readq(host->base + MIO_EMM_SWITCH(host)); in check_switch_errors()
171 dev_err(host->dev, "Switch power class error\n"); in check_switch_errors()
173 dev_err(host->dev, "Switch hs timing error\n"); in check_switch_errors()
175 dev_err(host->dev, "Switch bus width error\n"); in check_switch_errors()
198 * with the commands send by the MMC core.
200 static void do_switch(struct cvm_mmc_host *host, u64 emm_switch) in do_switch() argument
212 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host)); in do_switch()
215 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host)); in do_switch()
219 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); in do_switch()
223 } while (--retries); in do_switch()
225 check_switch_errors(host); in do_switch()
233 return (slot->cached_switch & match) != (new_val & match); in switch_val_changed()
240 if (!slot->clock) in set_wdog()
244 timeout = (slot->clock * ns) / NSEC_PER_SEC; in set_wdog()
246 timeout = (slot->clock * 850ull) / 1000ull; in set_wdog()
247 writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host)); in set_wdog()
252 struct cvm_mmc_host *host = slot->host; in cvm_mmc_reset_bus() local
255 emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host)); in cvm_mmc_reset_bus()
258 set_bus_id(&emm_switch, slot->bus_id); in cvm_mmc_reset_bus()
260 wdog = readq(slot->host->base + MIO_EMM_WDOG(host)); in cvm_mmc_reset_bus()
261 do_switch(slot->host, emm_switch); in cvm_mmc_reset_bus()
263 slot->cached_switch = emm_switch; in cvm_mmc_reset_bus()
267 writeq(wdog, slot->host->base + MIO_EMM_WDOG(host)); in cvm_mmc_reset_bus()
273 struct cvm_mmc_host *host = slot->host; in cvm_mmc_switch_to() local
277 if (slot->bus_id == host->last_slot) in cvm_mmc_switch_to()
280 if (host->last_slot >= 0 && host->slot[host->last_slot]) { in cvm_mmc_switch_to()
281 old_slot = host->slot[host->last_slot]; in cvm_mmc_switch_to()
282 old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host)); in cvm_mmc_switch_to()
283 old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host)); in cvm_mmc_switch_to()
286 writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host)); in cvm_mmc_switch_to()
287 emm_switch = slot->cached_switch; in cvm_mmc_switch_to()
288 set_bus_id(&emm_switch, slot->bus_id); in cvm_mmc_switch_to()
289 do_switch(host, emm_switch); in cvm_mmc_switch_to()
291 emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) | in cvm_mmc_switch_to()
292 FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt); in cvm_mmc_switch_to()
293 writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host)); in cvm_mmc_switch_to()
295 host->last_slot = slot->bus_id; in cvm_mmc_switch_to()
298 static void do_read(struct cvm_mmc_host *host, struct mmc_request *req, in do_read() argument
301 struct sg_mapping_iter *smi = &host->smi; in do_read()
302 int data_len = req->data->blocks * req->data->blksz; in do_read()
303 int bytes_xfered, shift = -1; in do_read()
307 writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host)); in do_read()
310 if (smi->consumed >= smi->length) { in do_read()
313 smi->consumed = 0; in do_read()
317 dat = readq(host->base + MIO_EMM_BUF_DAT(host)); in do_read()
321 while (smi->consumed < smi->length && shift >= 0) { in do_read()
322 ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff; in do_read()
324 smi->consumed++; in do_read()
325 shift -= 8; in do_read()
330 req->data->bytes_xfered = bytes_xfered; in do_read()
331 req->data->error = 0; in do_read()
336 req->data->bytes_xfered = req->data->blocks * req->data->blksz; in do_write()
337 req->data->error = 0; in do_write()
340 static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req, in set_cmd_response() argument
348 rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host)); in set_cmd_response()
353 req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff; in set_cmd_response()
354 req->cmd->resp[1] = 0; in set_cmd_response()
355 req->cmd->resp[2] = 0; in set_cmd_response()
356 req->cmd->resp[3] = 0; in set_cmd_response()
359 req->cmd->resp[3] = rsp_lo & 0xffffffff; in set_cmd_response()
360 req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff; in set_cmd_response()
361 rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host)); in set_cmd_response()
362 req->cmd->resp[1] = rsp_hi & 0xffffffff; in set_cmd_response()
363 req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff; in set_cmd_response()
370 return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in get_dma_dir()
373 static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) in finish_dma_single() argument
375 data->bytes_xfered = data->blocks * data->blksz; in finish_dma_single()
376 data->error = 0; in finish_dma_single()
377 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); in finish_dma_single()
381 static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data) in finish_dma_sg() argument
387 fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in finish_dma_sg()
390 dev_err(host->dev, "%u requests still pending\n", count); in finish_dma_sg()
392 data->bytes_xfered = data->blocks * data->blksz; in finish_dma_sg()
393 data->error = 0; in finish_dma_sg()
396 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in finish_dma_sg()
397 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); in finish_dma_sg()
401 static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data) in finish_dma() argument
403 if (host->use_sg && data->sg_len > 1) in finish_dma()
404 return finish_dma_sg(host, data); in finish_dma()
406 return finish_dma_single(host, data); in finish_dma()
414 return -EILSEQ; in check_status()
417 return -ETIMEDOUT; in check_status()
419 return -EIO; in check_status()
424 static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts) in cleanup_dma() argument
428 emm_dma = readq(host->base + MIO_EMM_DMA(host)); in cleanup_dma()
432 writeq(emm_dma, host->base + MIO_EMM_DMA(host)); in cleanup_dma()
437 struct cvm_mmc_host *host = dev_id; in cvm_mmc_interrupt() local
442 if (host->need_irq_handler_lock) in cvm_mmc_interrupt()
443 spin_lock(&host->irq_handler_lock); in cvm_mmc_interrupt()
445 __acquire(&host->irq_handler_lock); in cvm_mmc_interrupt()
448 emm_int = readq(host->base + MIO_EMM_INT(host)); in cvm_mmc_interrupt()
449 writeq(emm_int, host->base + MIO_EMM_INT(host)); in cvm_mmc_interrupt()
452 check_switch_errors(host); in cvm_mmc_interrupt()
454 req = host->current_req; in cvm_mmc_interrupt()
458 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); in cvm_mmc_interrupt()
464 if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active) in cvm_mmc_interrupt()
467 if (!host->dma_active && req->data && in cvm_mmc_interrupt()
472 do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF); in cvm_mmc_interrupt()
482 if (!(host_done && req->done)) in cvm_mmc_interrupt()
485 req->cmd->error = check_status(rsp_sts); in cvm_mmc_interrupt()
487 if (host->dma_active && req->data) in cvm_mmc_interrupt()
488 if (!finish_dma(host, req->data)) in cvm_mmc_interrupt()
491 set_cmd_response(host, req, rsp_sts); in cvm_mmc_interrupt()
494 cleanup_dma(host, rsp_sts); in cvm_mmc_interrupt()
496 host->current_req = NULL; in cvm_mmc_interrupt()
497 req->done(req); in cvm_mmc_interrupt()
500 if (host->dmar_fixup_done) in cvm_mmc_interrupt()
501 host->dmar_fixup_done(host); in cvm_mmc_interrupt()
503 host->release_bus(host); in cvm_mmc_interrupt()
505 if (host->need_irq_handler_lock) in cvm_mmc_interrupt()
506 spin_unlock(&host->irq_handler_lock); in cvm_mmc_interrupt()
508 __release(&host->irq_handler_lock); in cvm_mmc_interrupt()
516 static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) in prepare_dma_single() argument
521 count = dma_map_sg(host->dev, data->sg, data->sg_len, in prepare_dma_single()
526 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; in prepare_dma_single()
533 (sg_dma_len(&data->sg[0]) / 8) - 1); in prepare_dma_single()
535 addr = sg_dma_address(&data->sg[0]); in prepare_dma_single()
536 if (!host->big_dma_addr) in prepare_dma_single()
538 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); in prepare_dma_single()
541 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count); in prepare_dma_single()
543 if (host->big_dma_addr) in prepare_dma_single()
544 writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host)); in prepare_dma_single()
552 static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data) in prepare_dma_sg() argument
558 count = dma_map_sg(host->dev, data->sg, data->sg_len, in prepare_dma_sg()
566 writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in prepare_dma_sg()
568 for_each_sg(data->sg, sg, count, i) { in prepare_dma_sg()
573 writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host)); in prepare_dma_sg()
576 * If we have scatter-gather support we also have an extra in prepare_dma_sg()
578 * host->big_dma_addr here. in prepare_dma_sg()
580 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; in prepare_dma_sg()
591 sg_dma_len(sg) / 8 - 1); in prepare_dma_sg()
596 writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host)); in prepare_dma_sg()
603 * address here, as it would not make sense for scatter-gather. in prepare_dma_sg()
605 * scatter-gather, so that is not a problem. in prepare_dma_sg()
611 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); in prepare_dma_sg()
613 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in prepare_dma_sg()
617 static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data) in prepare_dma() argument
619 if (host->use_sg && data->sg_len > 1) in prepare_dma()
620 return prepare_dma_sg(host, data); in prepare_dma()
622 return prepare_dma_single(host, data); in prepare_dma()
625 static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq) in prepare_ext_dma() argument
627 struct cvm_mmc_slot *slot = mmc_priv(mmc); in prepare_ext_dma()
632 mmc_card_is_blockaddr(mmc->card) ? 1 : 0) | in prepare_ext_dma()
634 (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) | in prepare_ext_dma()
635 FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) | in prepare_ext_dma()
636 FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg); in prepare_ext_dma()
637 set_bus_id(&emm_dma, slot->bus_id); in prepare_ext_dma()
639 if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) && in prepare_ext_dma()
640 (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT))) in prepare_ext_dma()
645 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0); in prepare_ext_dma()
649 static void cvm_mmc_dma_request(struct mmc_host *mmc, in cvm_mmc_dma_request() argument
652 struct cvm_mmc_slot *slot = mmc_priv(mmc); in cvm_mmc_dma_request()
653 struct cvm_mmc_host *host = slot->host; in cvm_mmc_dma_request() local
657 if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len || in cvm_mmc_dma_request()
658 !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) { in cvm_mmc_dma_request()
659 dev_err(&mmc->card->dev, "Error: %s no data\n", __func__); in cvm_mmc_dma_request()
665 data = mrq->data; in cvm_mmc_dma_request()
667 data->blocks, data->blksz, data->blocks * data->blksz); in cvm_mmc_dma_request()
668 if (data->timeout_ns) in cvm_mmc_dma_request()
669 set_wdog(slot, data->timeout_ns); in cvm_mmc_dma_request()
671 WARN_ON(host->current_req); in cvm_mmc_dma_request()
672 host->current_req = mrq; in cvm_mmc_dma_request()
674 emm_dma = prepare_ext_dma(mmc, mrq); in cvm_mmc_dma_request()
675 addr = prepare_dma(host, data); in cvm_mmc_dma_request()
677 dev_err(host->dev, "prepare_dma failed\n"); in cvm_mmc_dma_request()
681 host->dma_active = true; in cvm_mmc_dma_request()
682 host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE | in cvm_mmc_dma_request()
685 if (host->dmar_fixup) in cvm_mmc_dma_request()
686 host->dmar_fixup(host, mrq->cmd, data, addr); in cvm_mmc_dma_request()
693 if (mmc_card_sd(mmc->card)) in cvm_mmc_dma_request()
694 writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_dma_request()
696 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_dma_request()
697 writeq(emm_dma, host->base + MIO_EMM_DMA(host)); in cvm_mmc_dma_request()
701 mrq->cmd->error = -EINVAL; in cvm_mmc_dma_request()
702 if (mrq->done) in cvm_mmc_dma_request()
703 mrq->done(mrq); in cvm_mmc_dma_request()
704 host->release_bus(host); in cvm_mmc_dma_request()
707 static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq) in do_read_request() argument
709 sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len, in do_read_request()
713 static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq) in do_write_request() argument
715 unsigned int data_len = mrq->data->blocks * mrq->data->blksz; in do_write_request()
716 struct sg_mapping_iter *smi = &host->smi; in do_write_request()
722 sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG); in do_write_request()
725 writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host)); in do_write_request()
728 if (smi->consumed >= smi->length) { in do_write_request()
731 smi->consumed = 0; in do_write_request()
734 while (smi->consumed < smi->length && shift >= 0) { in do_write_request()
735 dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift; in do_write_request()
737 smi->consumed++; in do_write_request()
738 shift -= 8; in do_write_request()
742 writeq(dat, host->base + MIO_EMM_BUF_DAT(host)); in do_write_request()
750 static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) in cvm_mmc_request() argument
752 struct cvm_mmc_slot *slot = mmc_priv(mmc); in cvm_mmc_request()
753 struct cvm_mmc_host *host = slot->host; in cvm_mmc_request() local
754 struct mmc_command *cmd = mrq->cmd; in cvm_mmc_request()
761 * All MMC devices share the same bus and controller. Allow only a in cvm_mmc_request()
762 * single user of the bootbus/MMC bus at a time. The lock is acquired in cvm_mmc_request()
763 * on all entry points from the MMC layer. in cvm_mmc_request()
768 host->acquire_bus(host); in cvm_mmc_request()
770 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || in cvm_mmc_request()
771 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) in cvm_mmc_request()
772 return cvm_mmc_dma_request(mmc, mrq); in cvm_mmc_request()
778 WARN_ON(host->current_req); in cvm_mmc_request()
779 host->current_req = mrq; in cvm_mmc_request()
781 if (cmd->data) { in cvm_mmc_request()
782 if (cmd->data->flags & MMC_DATA_READ) in cvm_mmc_request()
783 do_read_request(host, mrq); in cvm_mmc_request()
785 do_write_request(host, mrq); in cvm_mmc_request()
787 if (cmd->data->timeout_ns) in cvm_mmc_request()
788 set_wdog(slot, cmd->data->timeout_ns); in cvm_mmc_request()
792 host->dma_active = false; in cvm_mmc_request()
793 host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR); in cvm_mmc_request()
798 FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) | in cvm_mmc_request()
799 FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg); in cvm_mmc_request()
800 set_bus_id(&emm_cmd, slot->bus_id); in cvm_mmc_request()
801 if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC) in cvm_mmc_request()
803 64 - ((cmd->data->blocks * cmd->data->blksz) / 8)); in cvm_mmc_request()
805 writeq(0, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_request()
808 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); in cvm_mmc_request()
814 if (--retries) in cvm_mmc_request()
818 dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts); in cvm_mmc_request()
819 writeq(emm_cmd, host->base + MIO_EMM_CMD(host)); in cvm_mmc_request()
822 static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) in cvm_mmc_set_ios() argument
824 struct cvm_mmc_slot *slot = mmc_priv(mmc); in cvm_mmc_set_ios()
825 struct cvm_mmc_host *host = slot->host; in cvm_mmc_set_ios() local
829 host->acquire_bus(host); in cvm_mmc_set_ios()
833 switch (ios->power_mode) { in cvm_mmc_set_ios()
839 if (host->global_pwr_gpiod) in cvm_mmc_set_ios()
840 host->set_shared_power(host, 0); in cvm_mmc_set_ios()
841 else if (!IS_ERR(mmc->supply.vmmc)) in cvm_mmc_set_ios()
842 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); in cvm_mmc_set_ios()
846 if (host->global_pwr_gpiod) in cvm_mmc_set_ios()
847 host->set_shared_power(host, 1); in cvm_mmc_set_ios()
848 else if (!IS_ERR(mmc->supply.vmmc)) in cvm_mmc_set_ios()
849 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); in cvm_mmc_set_ios()
854 switch (ios->bus_width) { in cvm_mmc_set_ios()
867 if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52) in cvm_mmc_set_ios()
871 clock = ios->clock; in cvm_mmc_set_ios()
874 slot->clock = clock; in cvm_mmc_set_ios()
877 clk_period = (host->sys_freq + clock - 1) / (2 * clock); in cvm_mmc_set_ios()
880 (ios->timing == MMC_TIMING_MMC_HS)) | in cvm_mmc_set_ios()
885 set_bus_id(&emm_switch, slot->bus_id); in cvm_mmc_set_ios()
891 do_switch(host, emm_switch); in cvm_mmc_set_ios()
892 slot->cached_switch = emm_switch; in cvm_mmc_set_ios()
894 host->release_bus(host); in cvm_mmc_set_ios()
906 struct mmc_host *mmc = slot->mmc; in cvm_mmc_set_clock() local
908 clock = min(clock, mmc->f_max); in cvm_mmc_set_clock()
909 clock = max(clock, mmc->f_min); in cvm_mmc_set_clock()
910 slot->clock = clock; in cvm_mmc_set_clock()
915 struct cvm_mmc_host *host = slot->host; in cvm_mmc_init_lowlevel() local
919 host->emm_cfg |= (1ull << slot->bus_id); in cvm_mmc_init_lowlevel()
920 writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host)); in cvm_mmc_init_lowlevel()
924 cvm_mmc_set_clock(slot, slot->mmc->f_min); in cvm_mmc_init_lowlevel()
927 (host->sys_freq / slot->clock) / 2); in cvm_mmc_init_lowlevel()
929 (host->sys_freq / slot->clock) / 2); in cvm_mmc_init_lowlevel()
932 set_bus_id(&emm_switch, slot->bus_id); in cvm_mmc_init_lowlevel()
933 do_switch(host, emm_switch); in cvm_mmc_init_lowlevel()
935 slot->cached_switch = emm_switch; in cvm_mmc_init_lowlevel()
944 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_init_lowlevel()
945 writeq(1, host->base + MIO_EMM_RCA(host)); in cvm_mmc_init_lowlevel()
952 struct device_node *node = dev->of_node; in cvm_mmc_of_parse()
953 struct mmc_host *mmc = slot->mmc; in cvm_mmc_of_parse() local
963 if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) { in cvm_mmc_of_parse()
965 return -EINVAL; in cvm_mmc_of_parse()
968 ret = mmc_regulator_get_supply(mmc); in cvm_mmc_of_parse()
972 * Legacy Octeon firmware has no regulator entry, fall-back to in cvm_mmc_of_parse()
973 * a hard-coded voltage to get a sane OCR. in cvm_mmc_of_parse()
975 if (IS_ERR(mmc->supply.vmmc)) in cvm_mmc_of_parse()
976 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; in cvm_mmc_of_parse()
978 /* Common MMC bindings */ in cvm_mmc_of_parse()
979 ret = mmc_of_parse(mmc); in cvm_mmc_of_parse()
984 if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) { in cvm_mmc_of_parse()
985 of_property_read_u32(node, "cavium,bus-max-width", &bus_width); in cvm_mmc_of_parse()
987 mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; in cvm_mmc_of_parse()
989 mmc->caps |= MMC_CAP_4_BIT_DATA; in cvm_mmc_of_parse()
993 if (!mmc->f_max) in cvm_mmc_of_parse()
994 of_property_read_u32(node, "spi-max-frequency", &mmc->f_max); in cvm_mmc_of_parse()
995 if (!mmc->f_max || mmc->f_max > 52000000) in cvm_mmc_of_parse()
996 mmc->f_max = 52000000; in cvm_mmc_of_parse()
997 mmc->f_min = 400000; in cvm_mmc_of_parse()
1000 clock_period = 1000000000000ull / slot->host->sys_freq; in cvm_mmc_of_parse()
1001 of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew); in cvm_mmc_of_parse()
1002 of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew); in cvm_mmc_of_parse()
1003 slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period; in cvm_mmc_of_parse()
1004 slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period; in cvm_mmc_of_parse()
1009 int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) in cvm_mmc_of_slot_probe() argument
1012 struct mmc_host *mmc; in cvm_mmc_of_slot_probe() local
1015 mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev); in cvm_mmc_of_slot_probe()
1016 if (!mmc) in cvm_mmc_of_slot_probe()
1017 return -ENOMEM; in cvm_mmc_of_slot_probe()
1019 slot = mmc_priv(mmc); in cvm_mmc_of_slot_probe()
1020 slot->mmc = mmc; in cvm_mmc_of_slot_probe()
1021 slot->host = host; in cvm_mmc_of_slot_probe()
1028 /* Set up host parameters */ in cvm_mmc_of_slot_probe()
1029 mmc->ops = &cvm_mmc_ops; in cvm_mmc_of_slot_probe()
1038 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | in cvm_mmc_of_slot_probe()
1041 if (host->use_sg) in cvm_mmc_of_slot_probe()
1042 mmc->max_segs = 16; in cvm_mmc_of_slot_probe()
1044 mmc->max_segs = 1; in cvm_mmc_of_slot_probe()
1047 mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024, in cvm_mmc_of_slot_probe()
1048 dma_get_max_seg_size(host->dev)); in cvm_mmc_of_slot_probe()
1049 mmc->max_req_size = mmc->max_seg_size; in cvm_mmc_of_slot_probe()
1051 mmc->max_blk_size = 512; in cvm_mmc_of_slot_probe()
1053 mmc->max_blk_count = 32767; in cvm_mmc_of_slot_probe()
1055 slot->clock = mmc->f_min; in cvm_mmc_of_slot_probe()
1056 slot->bus_id = id; in cvm_mmc_of_slot_probe()
1057 slot->cached_rca = 1; in cvm_mmc_of_slot_probe()
1059 host->acquire_bus(host); in cvm_mmc_of_slot_probe()
1060 host->slot[id] = slot; in cvm_mmc_of_slot_probe()
1063 host->release_bus(host); in cvm_mmc_of_slot_probe()
1065 ret = mmc_add_host(mmc); in cvm_mmc_of_slot_probe()
1068 slot->host->slot[id] = NULL; in cvm_mmc_of_slot_probe()
1074 mmc_free_host(slot->mmc); in cvm_mmc_of_slot_probe()
1080 mmc_remove_host(slot->mmc); in cvm_mmc_of_slot_remove()
1081 slot->host->slot[slot->bus_id] = NULL; in cvm_mmc_of_slot_remove()
1082 mmc_free_host(slot->mmc); in cvm_mmc_of_slot_remove()