Lines Matching full:host

44  * The Cavium MMC host hardware assumes that all commands have fixed
47 * response types that are unexpected by the host hardware.
165 static void check_switch_errors(struct cvm_mmc_host *host) in check_switch_errors() argument
169 emm_switch = readq(host->base + MIO_EMM_SWITCH(host)); in check_switch_errors()
171 dev_err(host->dev, "Switch power class error\n"); in check_switch_errors()
173 dev_err(host->dev, "Switch hs timing error\n"); in check_switch_errors()
175 dev_err(host->dev, "Switch bus width error\n"); in check_switch_errors()
200 static void do_switch(struct cvm_mmc_host *host, u64 emm_switch) in do_switch() argument
212 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host)); in do_switch()
215 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host)); in do_switch()
219 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); in do_switch()
225 check_switch_errors(host); in do_switch()
247 writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host)); in set_wdog()
252 struct cvm_mmc_host *host = slot->host; in cvm_mmc_reset_bus() local
255 emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host)); in cvm_mmc_reset_bus()
260 wdog = readq(slot->host->base + MIO_EMM_WDOG(host)); in cvm_mmc_reset_bus()
261 do_switch(slot->host, emm_switch); in cvm_mmc_reset_bus()
267 writeq(wdog, slot->host->base + MIO_EMM_WDOG(host)); in cvm_mmc_reset_bus()
273 struct cvm_mmc_host *host = slot->host; in cvm_mmc_switch_to() local
277 if (slot->bus_id == host->last_slot) in cvm_mmc_switch_to()
280 if (host->last_slot >= 0 && host->slot[host->last_slot]) { in cvm_mmc_switch_to()
281 old_slot = host->slot[host->last_slot]; in cvm_mmc_switch_to()
282 old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host)); in cvm_mmc_switch_to()
283 old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host)); in cvm_mmc_switch_to()
286 writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host)); in cvm_mmc_switch_to()
289 do_switch(host, emm_switch); in cvm_mmc_switch_to()
293 writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host)); in cvm_mmc_switch_to()
295 host->last_slot = slot->bus_id; in cvm_mmc_switch_to()
298 static void do_read(struct cvm_mmc_host *host, struct mmc_request *req, in do_read() argument
301 struct sg_mapping_iter *smi = &host->smi; in do_read()
307 writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host)); in do_read()
317 dat = readq(host->base + MIO_EMM_BUF_DAT(host)); in do_read()
340 static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req, in set_cmd_response() argument
348 rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host)); in set_cmd_response()
361 rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host)); in set_cmd_response()
373 static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) in finish_dma_single() argument
377 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); in finish_dma_single()
381 static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data) in finish_dma_sg() argument
387 fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in finish_dma_sg()
390 dev_err(host->dev, "%u requests still pending\n", count); in finish_dma_sg()
396 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in finish_dma_sg()
397 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); in finish_dma_sg()
401 static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data) in finish_dma() argument
403 if (host->use_sg && data->sg_len > 1) in finish_dma()
404 return finish_dma_sg(host, data); in finish_dma()
406 return finish_dma_single(host, data); in finish_dma()
424 static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts) in cleanup_dma() argument
428 emm_dma = readq(host->base + MIO_EMM_DMA(host)); in cleanup_dma()
432 writeq(emm_dma, host->base + MIO_EMM_DMA(host)); in cleanup_dma()
437 struct cvm_mmc_host *host = dev_id; in cvm_mmc_interrupt() local
442 if (host->need_irq_handler_lock) in cvm_mmc_interrupt()
443 spin_lock(&host->irq_handler_lock); in cvm_mmc_interrupt()
445 __acquire(&host->irq_handler_lock); in cvm_mmc_interrupt()
448 emm_int = readq(host->base + MIO_EMM_INT(host)); in cvm_mmc_interrupt()
449 writeq(emm_int, host->base + MIO_EMM_INT(host)); in cvm_mmc_interrupt()
452 check_switch_errors(host); in cvm_mmc_interrupt()
454 req = host->current_req; in cvm_mmc_interrupt()
458 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); in cvm_mmc_interrupt()
464 if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active) in cvm_mmc_interrupt()
467 if (!host->dma_active && req->data && in cvm_mmc_interrupt()
472 do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF); in cvm_mmc_interrupt()
487 if (host->dma_active && req->data) in cvm_mmc_interrupt()
488 if (!finish_dma(host, req->data)) in cvm_mmc_interrupt()
491 set_cmd_response(host, req, rsp_sts); in cvm_mmc_interrupt()
494 cleanup_dma(host, rsp_sts); in cvm_mmc_interrupt()
496 host->current_req = NULL; in cvm_mmc_interrupt()
500 if (host->dmar_fixup_done) in cvm_mmc_interrupt()
501 host->dmar_fixup_done(host); in cvm_mmc_interrupt()
503 host->release_bus(host); in cvm_mmc_interrupt()
505 if (host->need_irq_handler_lock) in cvm_mmc_interrupt()
506 spin_unlock(&host->irq_handler_lock); in cvm_mmc_interrupt()
508 __release(&host->irq_handler_lock); in cvm_mmc_interrupt()
516 static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data) in prepare_dma_single() argument
521 count = dma_map_sg(host->dev, data->sg, data->sg_len, in prepare_dma_single()
536 if (!host->big_dma_addr) in prepare_dma_single()
538 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); in prepare_dma_single()
543 if (host->big_dma_addr) in prepare_dma_single()
544 writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host)); in prepare_dma_single()
552 static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data) in prepare_dma_sg() argument
558 count = dma_map_sg(host->dev, data->sg, data->sg_len, in prepare_dma_sg()
566 writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in prepare_dma_sg()
573 writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host)); in prepare_dma_sg()
578 * host->big_dma_addr here. in prepare_dma_sg()
596 writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host)); in prepare_dma_sg()
611 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); in prepare_dma_sg()
613 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); in prepare_dma_sg()
617 static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data) in prepare_dma() argument
619 if (host->use_sg && data->sg_len > 1) in prepare_dma()
620 return prepare_dma_sg(host, data); in prepare_dma()
622 return prepare_dma_single(host, data); in prepare_dma()
653 struct cvm_mmc_host *host = slot->host; in cvm_mmc_dma_request() local
671 WARN_ON(host->current_req); in cvm_mmc_dma_request()
672 host->current_req = mrq; in cvm_mmc_dma_request()
675 addr = prepare_dma(host, data); in cvm_mmc_dma_request()
677 dev_err(host->dev, "prepare_dma failed\n"); in cvm_mmc_dma_request()
681 host->dma_active = true; in cvm_mmc_dma_request()
682 host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE | in cvm_mmc_dma_request()
685 if (host->dmar_fixup) in cvm_mmc_dma_request()
686 host->dmar_fixup(host, mrq->cmd, data, addr); in cvm_mmc_dma_request()
694 writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_dma_request()
696 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_dma_request()
697 writeq(emm_dma, host->base + MIO_EMM_DMA(host)); in cvm_mmc_dma_request()
704 host->release_bus(host); in cvm_mmc_dma_request()
707 static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq) in do_read_request() argument
709 sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len, in do_read_request()
713 static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq) in do_write_request() argument
716 struct sg_mapping_iter *smi = &host->smi; in do_write_request()
725 writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host)); in do_write_request()
742 writeq(dat, host->base + MIO_EMM_BUF_DAT(host)); in do_write_request()
753 struct cvm_mmc_host *host = slot->host; in cvm_mmc_request() local
768 host->acquire_bus(host); in cvm_mmc_request()
778 WARN_ON(host->current_req); in cvm_mmc_request()
779 host->current_req = mrq; in cvm_mmc_request()
783 do_read_request(host, mrq); in cvm_mmc_request()
785 do_write_request(host, mrq); in cvm_mmc_request()
792 host->dma_active = false; in cvm_mmc_request()
793 host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR); in cvm_mmc_request()
805 writeq(0, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_request()
808 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); in cvm_mmc_request()
818 dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts); in cvm_mmc_request()
819 writeq(emm_cmd, host->base + MIO_EMM_CMD(host)); in cvm_mmc_request()
825 struct cvm_mmc_host *host = slot->host; in cvm_mmc_set_ios() local
829 host->acquire_bus(host); in cvm_mmc_set_ios()
839 if (host->global_pwr_gpiod) in cvm_mmc_set_ios()
840 host->set_shared_power(host, 0); in cvm_mmc_set_ios()
846 if (host->global_pwr_gpiod) in cvm_mmc_set_ios()
847 host->set_shared_power(host, 1); in cvm_mmc_set_ios()
877 clk_period = (host->sys_freq + clock - 1) / (2 * clock); in cvm_mmc_set_ios()
891 do_switch(host, emm_switch); in cvm_mmc_set_ios()
894 host->release_bus(host); in cvm_mmc_set_ios()
915 struct cvm_mmc_host *host = slot->host; in cvm_mmc_init_lowlevel() local
919 host->emm_cfg |= (1ull << slot->bus_id); in cvm_mmc_init_lowlevel()
920 writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host)); in cvm_mmc_init_lowlevel()
927 (host->sys_freq / slot->clock) / 2); in cvm_mmc_init_lowlevel()
929 (host->sys_freq / slot->clock) / 2); in cvm_mmc_init_lowlevel()
933 do_switch(host, emm_switch); in cvm_mmc_init_lowlevel()
944 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host)); in cvm_mmc_init_lowlevel()
945 writeq(1, host->base + MIO_EMM_RCA(host)); in cvm_mmc_init_lowlevel()
963 if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) { in cvm_mmc_of_parse()
1000 clock_period = 1000000000000ull / slot->host->sys_freq; in cvm_mmc_of_parse()
1009 int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) in cvm_mmc_of_slot_probe() argument
1021 slot->host = host; in cvm_mmc_of_slot_probe()
1028 /* Set up host parameters */ in cvm_mmc_of_slot_probe()
1041 if (host->use_sg) in cvm_mmc_of_slot_probe()
1048 dma_get_max_seg_size(host->dev)); in cvm_mmc_of_slot_probe()
1059 host->acquire_bus(host); in cvm_mmc_of_slot_probe()
1060 host->slot[id] = slot; in cvm_mmc_of_slot_probe()
1063 host->release_bus(host); in cvm_mmc_of_slot_probe()
1068 slot->host->slot[id] = NULL; in cvm_mmc_of_slot_probe()
1081 slot->host->slot[slot->bus_id] = NULL; in cvm_mmc_of_slot_remove()