Lines Matching +full:tegra210 +full:- +full:qspi
1 // SPDX-License-Identifier: GPL-2.0-only
9 #include <linux/dma-mapping.h>
235 return readl(tqspi->base + offset); in tegra_qspi_readl()
240 writel(value, tqspi->base + offset); in tegra_qspi_writel()
244 readl(tqspi->base + QSPI_COMMAND1); in tegra_qspi_writel()
271 unsigned int remain_len = t->len - tqspi->cur_pos; in tegra_qspi_calculate_curr_xfer_param()
272 unsigned int bits_per_word = t->bits_per_word; in tegra_qspi_calculate_curr_xfer_param()
274 tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8); in tegra_qspi_calculate_curr_xfer_param()
277 * Tegra QSPI controller supports packed or unpacked mode transfers. in tegra_qspi_calculate_curr_xfer_param()
284 bits_per_word == 32) && t->len > 3) { in tegra_qspi_calculate_curr_xfer_param()
285 tqspi->is_packed = true; in tegra_qspi_calculate_curr_xfer_param()
286 tqspi->words_per_32bit = 32 / bits_per_word; in tegra_qspi_calculate_curr_xfer_param()
288 tqspi->is_packed = false; in tegra_qspi_calculate_curr_xfer_param()
289 tqspi->words_per_32bit = 1; in tegra_qspi_calculate_curr_xfer_param()
292 if (tqspi->is_packed) { in tegra_qspi_calculate_curr_xfer_param()
293 max_len = min(remain_len, tqspi->max_buf_size); in tegra_qspi_calculate_curr_xfer_param()
294 tqspi->curr_dma_words = max_len / tqspi->bytes_per_word; in tegra_qspi_calculate_curr_xfer_param()
297 max_word = (remain_len - 1) / tqspi->bytes_per_word + 1; in tegra_qspi_calculate_curr_xfer_param()
298 max_word = min(max_word, tqspi->max_buf_size / 4); in tegra_qspi_calculate_curr_xfer_param()
299 tqspi->curr_dma_words = max_word; in tegra_qspi_calculate_curr_xfer_param()
311 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; in tegra_qspi_fill_tx_fifo_from_client_txbuf()
317 if (tqspi->is_packed) { in tegra_qspi_fill_tx_fifo_from_client_txbuf()
318 fifo_words_left = tx_empty_count * tqspi->words_per_32bit; in tegra_qspi_fill_tx_fifo_from_client_txbuf()
319 written_words = min(fifo_words_left, tqspi->curr_dma_words); in tegra_qspi_fill_tx_fifo_from_client_txbuf()
320 len = written_words * tqspi->bytes_per_word; in tegra_qspi_fill_tx_fifo_from_client_txbuf()
325 for (i = 0; (i < 4) && len; i++, len--) in tegra_qspi_fill_tx_fifo_from_client_txbuf()
330 tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word; in tegra_qspi_fill_tx_fifo_from_client_txbuf()
333 u8 bytes_per_word = tqspi->bytes_per_word; in tegra_qspi_fill_tx_fifo_from_client_txbuf()
335 max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count); in tegra_qspi_fill_tx_fifo_from_client_txbuf()
337 len = written_words * tqspi->bytes_per_word; in tegra_qspi_fill_tx_fifo_from_client_txbuf()
338 if (len > t->len - tqspi->cur_pos) in tegra_qspi_fill_tx_fifo_from_client_txbuf()
339 len = t->len - tqspi->cur_pos; in tegra_qspi_fill_tx_fifo_from_client_txbuf()
344 for (i = 0; len && (i < bytes_per_word); i++, len--) in tegra_qspi_fill_tx_fifo_from_client_txbuf()
349 tqspi->cur_tx_pos += write_bytes; in tegra_qspi_fill_tx_fifo_from_client_txbuf()
358 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos; in tegra_qspi_read_rx_fifo_to_client_rxbuf()
365 if (tqspi->is_packed) { in tegra_qspi_read_rx_fifo_to_client_rxbuf()
366 len = tqspi->curr_dma_words * tqspi->bytes_per_word; in tegra_qspi_read_rx_fifo_to_client_rxbuf()
370 for (i = 0; len && (i < 4); i++, len--) in tegra_qspi_read_rx_fifo_to_client_rxbuf()
374 read_words += tqspi->curr_dma_words; in tegra_qspi_read_rx_fifo_to_client_rxbuf()
375 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; in tegra_qspi_read_rx_fifo_to_client_rxbuf()
377 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; in tegra_qspi_read_rx_fifo_to_client_rxbuf()
378 u8 bytes_per_word = tqspi->bytes_per_word; in tegra_qspi_read_rx_fifo_to_client_rxbuf()
382 if (len > t->len - tqspi->cur_pos) in tegra_qspi_read_rx_fifo_to_client_rxbuf()
383 len = t->len - tqspi->cur_pos; in tegra_qspi_read_rx_fifo_to_client_rxbuf()
388 for (i = 0; len && (i < bytes_per_word); i++, len--) in tegra_qspi_read_rx_fifo_to_client_rxbuf()
393 tqspi->cur_rx_pos += read_bytes; in tegra_qspi_read_rx_fifo_to_client_rxbuf()
402 dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys, in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
403 tqspi->dma_buf_size, DMA_TO_DEVICE); in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
413 if (tqspi->is_packed) { in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
414 tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
416 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
423 consume = tqspi->curr_dma_words * tqspi->bytes_per_word; in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
424 if (consume > t->len - tqspi->cur_pos) in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
425 consume = t->len - tqspi->cur_pos; in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
427 for (count = 0; count < tqspi->curr_dma_words; count++) { in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
430 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--) in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
432 tqspi->tx_dma_buf[count] = x; in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
435 tqspi->cur_tx_pos += write_bytes; in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
438 dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys, in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
439 tqspi->dma_buf_size, DMA_TO_DEVICE); in tegra_qspi_copy_client_txbuf_to_qspi_txbuf()
445 dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys, in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
446 tqspi->dma_buf_size, DMA_FROM_DEVICE); in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
448 if (tqspi->is_packed) { in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
449 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
451 unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos; in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
452 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
460 consume = tqspi->curr_dma_words * tqspi->bytes_per_word; in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
461 if (consume > t->len - tqspi->cur_pos) in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
462 consume = t->len - tqspi->cur_pos; in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
464 for (count = 0; count < tqspi->curr_dma_words; count++) { in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
465 u32 x = tqspi->rx_dma_buf[count] & rx_mask; in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
467 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--) in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
471 tqspi->cur_rx_pos += read_bytes; in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
474 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
475 tqspi->dma_buf_size, DMA_FROM_DEVICE); in tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf()
489 reinit_completion(&tqspi->tx_dma_complete); in tegra_qspi_start_tx_dma()
491 if (tqspi->is_packed) in tegra_qspi_start_tx_dma()
492 tx_dma_phys = t->tx_dma; in tegra_qspi_start_tx_dma()
494 tx_dma_phys = tqspi->tx_dma_phys; in tegra_qspi_start_tx_dma()
496 tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys, in tegra_qspi_start_tx_dma()
500 if (!tqspi->tx_dma_desc) { in tegra_qspi_start_tx_dma()
501 dev_err(tqspi->dev, "Unable to get TX descriptor\n"); in tegra_qspi_start_tx_dma()
502 return -EIO; in tegra_qspi_start_tx_dma()
505 tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete; in tegra_qspi_start_tx_dma()
506 tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete; in tegra_qspi_start_tx_dma()
507 dmaengine_submit(tqspi->tx_dma_desc); in tegra_qspi_start_tx_dma()
508 dma_async_issue_pending(tqspi->tx_dma_chan); in tegra_qspi_start_tx_dma()
517 reinit_completion(&tqspi->rx_dma_complete); in tegra_qspi_start_rx_dma()
519 if (tqspi->is_packed) in tegra_qspi_start_rx_dma()
520 rx_dma_phys = t->rx_dma; in tegra_qspi_start_rx_dma()
522 rx_dma_phys = tqspi->rx_dma_phys; in tegra_qspi_start_rx_dma()
524 tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys, in tegra_qspi_start_rx_dma()
528 if (!tqspi->rx_dma_desc) { in tegra_qspi_start_rx_dma()
529 dev_err(tqspi->dev, "Unable to get RX descriptor\n"); in tegra_qspi_start_rx_dma()
530 return -EIO; in tegra_qspi_start_rx_dma()
533 tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete; in tegra_qspi_start_rx_dma()
534 tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete; in tegra_qspi_start_rx_dma()
535 dmaengine_submit(tqspi->rx_dma_desc); in tegra_qspi_start_rx_dma()
536 dma_async_issue_pending(tqspi->rx_dma_chan); in tegra_qspi_start_rx_dma()
543 void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS; in tegra_qspi_flush_fifos()
574 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; in tegra_qspi_dma_map_xfer()
575 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos; in tegra_qspi_dma_map_xfer()
578 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; in tegra_qspi_dma_map_xfer()
580 if (t->tx_buf) { in tegra_qspi_dma_map_xfer()
581 t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE); in tegra_qspi_dma_map_xfer()
582 if (dma_mapping_error(tqspi->dev, t->tx_dma)) in tegra_qspi_dma_map_xfer()
583 return -ENOMEM; in tegra_qspi_dma_map_xfer()
586 if (t->rx_buf) { in tegra_qspi_dma_map_xfer()
587 t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE); in tegra_qspi_dma_map_xfer()
588 if (dma_mapping_error(tqspi->dev, t->rx_dma)) { in tegra_qspi_dma_map_xfer()
589 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); in tegra_qspi_dma_map_xfer()
590 return -ENOMEM; in tegra_qspi_dma_map_xfer()
601 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; in tegra_qspi_dma_unmap_xfer()
603 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); in tegra_qspi_dma_unmap_xfer()
604 dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); in tegra_qspi_dma_unmap_xfer()
615 if (tqspi->is_packed) { in tegra_qspi_start_dma_based_transfer()
621 val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1); in tegra_qspi_start_dma_based_transfer()
626 if (tqspi->is_packed) in tegra_qspi_start_dma_based_transfer()
627 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; in tegra_qspi_start_dma_based_transfer()
629 len = tqspi->curr_dma_words * 4; in tegra_qspi_start_dma_based_transfer()
645 tqspi->dma_control_reg = val; in tegra_qspi_start_dma_based_transfer()
648 if (tqspi->cur_direction & DATA_DIR_TX) { in tegra_qspi_start_dma_based_transfer()
649 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; in tegra_qspi_start_dma_based_transfer()
652 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); in tegra_qspi_start_dma_based_transfer()
654 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); in tegra_qspi_start_dma_based_transfer()
661 dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); in tegra_qspi_start_dma_based_transfer()
666 if (tqspi->cur_direction & DATA_DIR_RX) { in tegra_qspi_start_dma_based_transfer()
667 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; in tegra_qspi_start_dma_based_transfer()
670 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); in tegra_qspi_start_dma_based_transfer()
672 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); in tegra_qspi_start_dma_based_transfer()
676 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, in tegra_qspi_start_dma_based_transfer()
677 tqspi->dma_buf_size, in tegra_qspi_start_dma_based_transfer()
682 dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); in tegra_qspi_start_dma_based_transfer()
683 if (tqspi->cur_direction & DATA_DIR_TX) in tegra_qspi_start_dma_based_transfer()
684 dmaengine_terminate_all(tqspi->tx_dma_chan); in tegra_qspi_start_dma_based_transfer()
689 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); in tegra_qspi_start_dma_based_transfer()
691 tqspi->is_curr_dma_xfer = true; in tegra_qspi_start_dma_based_transfer()
692 tqspi->dma_control_reg = val; in tegra_qspi_start_dma_based_transfer()
699 static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t) in tegra_qspi_start_cpu_based_transfer() argument
704 if (qspi->cur_direction & DATA_DIR_TX) in tegra_qspi_start_cpu_based_transfer()
705 cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t); in tegra_qspi_start_cpu_based_transfer()
707 cur_words = qspi->curr_dma_words; in tegra_qspi_start_cpu_based_transfer()
709 val = QSPI_DMA_BLK_SET(cur_words - 1); in tegra_qspi_start_cpu_based_transfer()
710 tegra_qspi_writel(qspi, val, QSPI_DMA_BLK); in tegra_qspi_start_cpu_based_transfer()
712 tegra_qspi_unmask_irq(qspi); in tegra_qspi_start_cpu_based_transfer()
714 qspi->is_curr_dma_xfer = false; in tegra_qspi_start_cpu_based_transfer()
715 val = qspi->command1_reg; in tegra_qspi_start_cpu_based_transfer()
717 tegra_qspi_writel(qspi, val, QSPI_COMMAND1); in tegra_qspi_start_cpu_based_transfer()
724 if (!tqspi->soc_data->has_dma) in tegra_qspi_deinit_dma()
727 if (tqspi->tx_dma_buf) { in tegra_qspi_deinit_dma()
728 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, in tegra_qspi_deinit_dma()
729 tqspi->tx_dma_buf, tqspi->tx_dma_phys); in tegra_qspi_deinit_dma()
730 tqspi->tx_dma_buf = NULL; in tegra_qspi_deinit_dma()
733 if (tqspi->tx_dma_chan) { in tegra_qspi_deinit_dma()
734 dma_release_channel(tqspi->tx_dma_chan); in tegra_qspi_deinit_dma()
735 tqspi->tx_dma_chan = NULL; in tegra_qspi_deinit_dma()
738 if (tqspi->rx_dma_buf) { in tegra_qspi_deinit_dma()
739 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, in tegra_qspi_deinit_dma()
740 tqspi->rx_dma_buf, tqspi->rx_dma_phys); in tegra_qspi_deinit_dma()
741 tqspi->rx_dma_buf = NULL; in tegra_qspi_deinit_dma()
744 if (tqspi->rx_dma_chan) { in tegra_qspi_deinit_dma()
745 dma_release_channel(tqspi->rx_dma_chan); in tegra_qspi_deinit_dma()
746 tqspi->rx_dma_chan = NULL; in tegra_qspi_deinit_dma()
757 if (!tqspi->soc_data->has_dma) in tegra_qspi_init_dma()
760 dma_chan = dma_request_chan(tqspi->dev, "rx"); in tegra_qspi_init_dma()
766 tqspi->rx_dma_chan = dma_chan; in tegra_qspi_init_dma()
768 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); in tegra_qspi_init_dma()
770 err = -ENOMEM; in tegra_qspi_init_dma()
774 tqspi->rx_dma_buf = dma_buf; in tegra_qspi_init_dma()
775 tqspi->rx_dma_phys = dma_phys; in tegra_qspi_init_dma()
777 dma_chan = dma_request_chan(tqspi->dev, "tx"); in tegra_qspi_init_dma()
783 tqspi->tx_dma_chan = dma_chan; in tegra_qspi_init_dma()
785 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); in tegra_qspi_init_dma()
787 err = -ENOMEM; in tegra_qspi_init_dma()
791 tqspi->tx_dma_buf = dma_buf; in tegra_qspi_init_dma()
792 tqspi->tx_dma_phys = dma_phys; in tegra_qspi_init_dma()
793 tqspi->use_dma = true; in tegra_qspi_init_dma()
800 if (err != -EPROBE_DEFER) { in tegra_qspi_init_dma()
801 dev_err(tqspi->dev, "cannot use DMA: %d\n", err); in tegra_qspi_init_dma()
802 dev_err(tqspi->dev, "falling back to PIO\n"); in tegra_qspi_init_dma()
812 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); in tegra_qspi_setup_transfer_one()
813 struct tegra_qspi_client_data *cdata = spi->controller_data; in tegra_qspi_setup_transfer_one()
814 u32 command1, command2, speed = t->speed_hz; in tegra_qspi_setup_transfer_one()
815 u8 bits_per_word = t->bits_per_word; in tegra_qspi_setup_transfer_one()
819 if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) { in tegra_qspi_setup_transfer_one()
820 clk_set_rate(tqspi->clk, speed); in tegra_qspi_setup_transfer_one()
821 tqspi->cur_speed = speed; in tegra_qspi_setup_transfer_one()
824 tqspi->cur_pos = 0; in tegra_qspi_setup_transfer_one()
825 tqspi->cur_rx_pos = 0; in tegra_qspi_setup_transfer_one()
826 tqspi->cur_tx_pos = 0; in tegra_qspi_setup_transfer_one()
827 tqspi->curr_xfer = t; in tegra_qspi_setup_transfer_one()
832 command1 = tqspi->def_command1_reg; in tegra_qspi_setup_transfer_one()
834 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); in tegra_qspi_setup_transfer_one()
837 req_mode = spi->mode & 0x3; in tegra_qspi_setup_transfer_one()
843 if (spi->mode & SPI_CS_HIGH) in tegra_qspi_setup_transfer_one()
849 if (cdata && cdata->tx_clk_tap_delay) in tegra_qspi_setup_transfer_one()
850 tx_tap = cdata->tx_clk_tap_delay; in tegra_qspi_setup_transfer_one()
852 if (cdata && cdata->rx_clk_tap_delay) in tegra_qspi_setup_transfer_one()
853 rx_tap = cdata->rx_clk_tap_delay; in tegra_qspi_setup_transfer_one()
856 if (command2 != tqspi->def_command2_reg) in tegra_qspi_setup_transfer_one()
860 command1 = tqspi->command1_reg; in tegra_qspi_setup_transfer_one()
862 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); in tegra_qspi_setup_transfer_one()
873 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); in tegra_qspi_start_transfer_one()
881 if (tqspi->is_packed) in tegra_qspi_start_transfer_one()
885 tqspi->cur_direction = 0; in tegra_qspi_start_transfer_one()
888 if (t->rx_buf) { in tegra_qspi_start_transfer_one()
890 tqspi->cur_direction |= DATA_DIR_RX; in tegra_qspi_start_transfer_one()
891 bus_width = t->rx_nbits; in tegra_qspi_start_transfer_one()
894 if (t->tx_buf) { in tegra_qspi_start_transfer_one()
896 tqspi->cur_direction |= DATA_DIR_TX; in tegra_qspi_start_transfer_one()
897 bus_width = t->tx_nbits; in tegra_qspi_start_transfer_one()
909 tqspi->command1_reg = command1; in tegra_qspi_start_transfer_one()
911 tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG); in tegra_qspi_start_transfer_one()
917 if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH) in tegra_qspi_start_transfer_one()
928 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); in tegra_qspi_parse_cdata_dt()
930 cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL); in tegra_qspi_parse_cdata_dt()
934 device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay", in tegra_qspi_parse_cdata_dt()
935 &cdata->tx_clk_tap_delay); in tegra_qspi_parse_cdata_dt()
936 device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay", in tegra_qspi_parse_cdata_dt()
937 &cdata->rx_clk_tap_delay); in tegra_qspi_parse_cdata_dt()
944 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); in tegra_qspi_setup()
945 struct tegra_qspi_client_data *cdata = spi->controller_data; in tegra_qspi_setup()
950 ret = pm_runtime_resume_and_get(tqspi->dev); in tegra_qspi_setup()
952 dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret); in tegra_qspi_setup()
958 spi->controller_data = cdata; in tegra_qspi_setup()
960 spin_lock_irqsave(&tqspi->lock, flags); in tegra_qspi_setup()
963 val = tqspi->def_command1_reg; in tegra_qspi_setup()
965 if (spi->mode & SPI_CS_HIGH) in tegra_qspi_setup()
970 tqspi->def_command1_reg = val; in tegra_qspi_setup()
971 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); in tegra_qspi_setup()
973 spin_unlock_irqrestore(&tqspi->lock, flags); in tegra_qspi_setup()
975 pm_runtime_put(tqspi->dev); in tegra_qspi_setup()
982 dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n"); in tegra_qspi_dump_regs()
983 dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n", in tegra_qspi_dump_regs()
986 dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n", in tegra_qspi_dump_regs()
989 dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n", in tegra_qspi_dump_regs()
992 dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n", in tegra_qspi_dump_regs()
999 dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg); in tegra_qspi_handle_error()
1002 if (device_reset(tqspi->dev) < 0) in tegra_qspi_handle_error()
1003 dev_warn_once(tqspi->dev, "device reset failed\n"); in tegra_qspi_handle_error()
1008 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); in tegra_qspi_transfer_end()
1009 int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1; in tegra_qspi_transfer_end()
1012 tqspi->command1_reg |= QSPI_CS_SW_VAL; in tegra_qspi_transfer_end()
1014 tqspi->command1_reg &= ~QSPI_CS_SW_VAL; in tegra_qspi_transfer_end()
1015 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); in tegra_qspi_transfer_end()
1016 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); in tegra_qspi_transfer_end()
1030 cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1); in tegra_qspi_cmd_config()
1049 addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1); in tegra_qspi_addr_config()
1059 struct spi_device *spi = msg->spi; in tegra_qspi_combined_seq_xfer()
1069 if (spi->mode & SPI_TPM_HW_FLOW) { in tegra_qspi_combined_seq_xfer()
1070 if (tqspi->soc_data->supports_tpm) in tegra_qspi_combined_seq_xfer()
1073 return -EIO; in tegra_qspi_combined_seq_xfer()
1078 list_for_each_entry(xfer, &msg->transfers, transfer_list) { in tegra_qspi_combined_seq_xfer()
1083 xfer->len); in tegra_qspi_combined_seq_xfer()
1084 cmd_value = *((const u8 *)(xfer->tx_buf)); in tegra_qspi_combined_seq_xfer()
1089 xfer->len); in tegra_qspi_combined_seq_xfer()
1090 address_value = *((const u32 *)(xfer->tx_buf)); in tegra_qspi_combined_seq_xfer()
1103 reinit_completion(&tqspi->xfer_completion); in tegra_qspi_combined_seq_xfer()
1110 dev_err(tqspi->dev, "Failed to start transfer-one: %d\n", in tegra_qspi_combined_seq_xfer()
1117 (&tqspi->xfer_completion, in tegra_qspi_combined_seq_xfer()
1121 dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n", in tegra_qspi_combined_seq_xfer()
1123 if (tqspi->is_curr_dma_xfer && in tegra_qspi_combined_seq_xfer()
1124 (tqspi->cur_direction & DATA_DIR_TX)) in tegra_qspi_combined_seq_xfer()
1126 (tqspi->tx_dma_chan); in tegra_qspi_combined_seq_xfer()
1128 if (tqspi->is_curr_dma_xfer && in tegra_qspi_combined_seq_xfer()
1129 (tqspi->cur_direction & DATA_DIR_RX)) in tegra_qspi_combined_seq_xfer()
1131 (tqspi->rx_dma_chan); in tegra_qspi_combined_seq_xfer()
1134 if (!tqspi->is_curr_dma_xfer) { in tegra_qspi_combined_seq_xfer()
1152 if (device_reset(tqspi->dev) < 0) in tegra_qspi_combined_seq_xfer()
1153 dev_warn_once(tqspi->dev, in tegra_qspi_combined_seq_xfer()
1155 ret = -EIO; in tegra_qspi_combined_seq_xfer()
1159 if (tqspi->tx_status || tqspi->rx_status) { in tegra_qspi_combined_seq_xfer()
1160 dev_err(tqspi->dev, "QSPI Transfer failed\n"); in tegra_qspi_combined_seq_xfer()
1161 tqspi->tx_status = 0; in tegra_qspi_combined_seq_xfer()
1162 tqspi->rx_status = 0; in tegra_qspi_combined_seq_xfer()
1163 ret = -EIO; in tegra_qspi_combined_seq_xfer()
1166 if (!xfer->cs_change) { in tegra_qspi_combined_seq_xfer()
1172 ret = -EINVAL; in tegra_qspi_combined_seq_xfer()
1175 msg->actual_length += xfer->len; in tegra_qspi_combined_seq_xfer()
1181 msg->status = ret; in tegra_qspi_combined_seq_xfer()
1193 struct spi_device *spi = msg->spi; in tegra_qspi_non_combined_seq_xfer()
1198 msg->status = 0; in tegra_qspi_non_combined_seq_xfer()
1199 msg->actual_length = 0; in tegra_qspi_non_combined_seq_xfer()
1200 tqspi->tx_status = 0; in tegra_qspi_non_combined_seq_xfer()
1201 tqspi->rx_status = 0; in tegra_qspi_non_combined_seq_xfer()
1206 if (tqspi->soc_data->supports_tpm) in tegra_qspi_non_combined_seq_xfer()
1209 list_for_each_entry(transfer, &msg->transfers, transfer_list) { in tegra_qspi_non_combined_seq_xfer()
1214 tqspi->dummy_cycles = 0; in tegra_qspi_non_combined_seq_xfer()
1216 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer in tegra_qspi_non_combined_seq_xfer()
1221 if (!list_is_last(&xfer->transfer_list, &msg->transfers)) { in tegra_qspi_non_combined_seq_xfer()
1225 if (next_xfer->dummy_data) { in tegra_qspi_non_combined_seq_xfer()
1226 u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits; in tegra_qspi_non_combined_seq_xfer()
1229 tqspi->dummy_cycles = dummy_cycles; in tegra_qspi_non_combined_seq_xfer()
1230 dummy_bytes = next_xfer->len; in tegra_qspi_non_combined_seq_xfer()
1236 reinit_completion(&tqspi->xfer_completion); in tegra_qspi_non_combined_seq_xfer()
1242 dev_err(tqspi->dev, "failed to start transfer: %d\n", ret); in tegra_qspi_non_combined_seq_xfer()
1246 ret = wait_for_completion_timeout(&tqspi->xfer_completion, in tegra_qspi_non_combined_seq_xfer()
1249 dev_err(tqspi->dev, "transfer timeout\n"); in tegra_qspi_non_combined_seq_xfer()
1250 if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX)) in tegra_qspi_non_combined_seq_xfer()
1251 dmaengine_terminate_all(tqspi->tx_dma_chan); in tegra_qspi_non_combined_seq_xfer()
1252 if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX)) in tegra_qspi_non_combined_seq_xfer()
1253 dmaengine_terminate_all(tqspi->rx_dma_chan); in tegra_qspi_non_combined_seq_xfer()
1255 ret = -EIO; in tegra_qspi_non_combined_seq_xfer()
1259 if (tqspi->tx_status || tqspi->rx_status) { in tegra_qspi_non_combined_seq_xfer()
1261 ret = -EIO; in tegra_qspi_non_combined_seq_xfer()
1265 msg->actual_length += xfer->len + dummy_bytes; in tegra_qspi_non_combined_seq_xfer()
1274 if (list_is_last(&xfer->transfer_list, &msg->transfers)) { in tegra_qspi_non_combined_seq_xfer()
1275 /* de-activate CS after last transfer only when cs_change is not set */ in tegra_qspi_non_combined_seq_xfer()
1276 if (!xfer->cs_change) { in tegra_qspi_non_combined_seq_xfer()
1280 } else if (xfer->cs_change) { in tegra_qspi_non_combined_seq_xfer()
1281 /* de-activated CS between the transfers only when cs_change is set */ in tegra_qspi_non_combined_seq_xfer()
1289 msg->status = ret; in tegra_qspi_non_combined_seq_xfer()
1300 list_for_each_entry(xfer, &msg->transfers, transfer_list) { in tegra_qspi_validate_cmb_seq()
1303 if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3) in tegra_qspi_validate_cmb_seq()
1305 xfer = list_first_entry(&msg->transfers, typeof(*xfer), in tegra_qspi_validate_cmb_seq()
1307 if (xfer->len > 2) in tegra_qspi_validate_cmb_seq()
1310 if (xfer->len > 4 || xfer->len < 3) in tegra_qspi_validate_cmb_seq()
1313 if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) in tegra_qspi_validate_cmb_seq()
1337 struct spi_transfer *t = tqspi->curr_xfer; in handle_cpu_based_xfer()
1340 spin_lock_irqsave(&tqspi->lock, flags); in handle_cpu_based_xfer()
1342 if (tqspi->tx_status || tqspi->rx_status) { in handle_cpu_based_xfer()
1344 complete(&tqspi->xfer_completion); in handle_cpu_based_xfer()
1348 if (tqspi->cur_direction & DATA_DIR_RX) in handle_cpu_based_xfer()
1351 if (tqspi->cur_direction & DATA_DIR_TX) in handle_cpu_based_xfer()
1352 tqspi->cur_pos = tqspi->cur_tx_pos; in handle_cpu_based_xfer()
1354 tqspi->cur_pos = tqspi->cur_rx_pos; in handle_cpu_based_xfer()
1356 if (tqspi->cur_pos == t->len) { in handle_cpu_based_xfer()
1357 complete(&tqspi->xfer_completion); in handle_cpu_based_xfer()
1364 spin_unlock_irqrestore(&tqspi->lock, flags); in handle_cpu_based_xfer()
1370 struct spi_transfer *t = tqspi->curr_xfer; in handle_dma_based_xfer()
1376 if (tqspi->cur_direction & DATA_DIR_TX) { in handle_dma_based_xfer()
1377 if (tqspi->tx_status) { in handle_dma_based_xfer()
1378 dmaengine_terminate_all(tqspi->tx_dma_chan); in handle_dma_based_xfer()
1382 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT); in handle_dma_based_xfer()
1384 dmaengine_terminate_all(tqspi->tx_dma_chan); in handle_dma_based_xfer()
1385 dev_err(tqspi->dev, "failed TX DMA transfer\n"); in handle_dma_based_xfer()
1391 if (tqspi->cur_direction & DATA_DIR_RX) { in handle_dma_based_xfer()
1392 if (tqspi->rx_status) { in handle_dma_based_xfer()
1393 dmaengine_terminate_all(tqspi->rx_dma_chan); in handle_dma_based_xfer()
1397 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT); in handle_dma_based_xfer()
1399 dmaengine_terminate_all(tqspi->rx_dma_chan); in handle_dma_based_xfer()
1400 dev_err(tqspi->dev, "failed RX DMA transfer\n"); in handle_dma_based_xfer()
1406 spin_lock_irqsave(&tqspi->lock, flags); in handle_dma_based_xfer()
1411 complete(&tqspi->xfer_completion); in handle_dma_based_xfer()
1415 if (tqspi->cur_direction & DATA_DIR_RX) in handle_dma_based_xfer()
1418 if (tqspi->cur_direction & DATA_DIR_TX) in handle_dma_based_xfer()
1419 tqspi->cur_pos = tqspi->cur_tx_pos; in handle_dma_based_xfer()
1421 tqspi->cur_pos = tqspi->cur_rx_pos; in handle_dma_based_xfer()
1423 if (tqspi->cur_pos == t->len) { in handle_dma_based_xfer()
1425 complete(&tqspi->xfer_completion); in handle_dma_based_xfer()
1439 spin_unlock_irqrestore(&tqspi->lock, flags); in handle_dma_based_xfer()
1447 tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); in tegra_qspi_isr_thread()
1449 if (tqspi->cur_direction & DATA_DIR_TX) in tegra_qspi_isr_thread()
1450 tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF); in tegra_qspi_isr_thread()
1452 if (tqspi->cur_direction & DATA_DIR_RX) in tegra_qspi_isr_thread()
1453 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF); in tegra_qspi_isr_thread()
1457 if (!tqspi->is_curr_dma_xfer) in tegra_qspi_isr_thread()
1493 .compatible = "nvidia,tegra210-qspi",
1496 .compatible = "nvidia,tegra186-qspi",
1499 .compatible = "nvidia,tegra194-qspi",
1502 .compatible = "nvidia,tegra234-qspi",
1505 .compatible = "nvidia,tegra241-qspi",
1542 host = devm_spi_alloc_host(&pdev->dev, sizeof(*tqspi)); in tegra_qspi_probe()
1544 return -ENOMEM; in tegra_qspi_probe()
1549 host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH | in tegra_qspi_probe()
1551 host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); in tegra_qspi_probe()
1552 host->flags = SPI_CONTROLLER_HALF_DUPLEX; in tegra_qspi_probe()
1553 host->setup = tegra_qspi_setup; in tegra_qspi_probe()
1554 host->transfer_one_message = tegra_qspi_transfer_one_message; in tegra_qspi_probe()
1555 host->num_chipselect = 1; in tegra_qspi_probe()
1556 host->auto_runtime_pm = true; in tegra_qspi_probe()
1558 bus_num = of_alias_get_id(pdev->dev.of_node, "spi"); in tegra_qspi_probe()
1560 host->bus_num = bus_num; in tegra_qspi_probe()
1562 tqspi->host = host; in tegra_qspi_probe()
1563 tqspi->dev = &pdev->dev; in tegra_qspi_probe()
1564 spin_lock_init(&tqspi->lock); in tegra_qspi_probe()
1566 tqspi->soc_data = device_get_match_data(&pdev->dev); in tegra_qspi_probe()
1567 host->num_chipselect = tqspi->soc_data->cs_count; in tegra_qspi_probe()
1568 tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); in tegra_qspi_probe()
1569 if (IS_ERR(tqspi->base)) in tegra_qspi_probe()
1570 return PTR_ERR(tqspi->base); in tegra_qspi_probe()
1572 tqspi->phys = r->start; in tegra_qspi_probe()
1576 tqspi->irq = qspi_irq; in tegra_qspi_probe()
1578 if (!has_acpi_companion(tqspi->dev)) { in tegra_qspi_probe()
1579 tqspi->clk = devm_clk_get(&pdev->dev, "qspi"); in tegra_qspi_probe()
1580 if (IS_ERR(tqspi->clk)) { in tegra_qspi_probe()
1581 ret = PTR_ERR(tqspi->clk); in tegra_qspi_probe()
1582 dev_err(&pdev->dev, "failed to get clock: %d\n", ret); in tegra_qspi_probe()
1588 tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2; in tegra_qspi_probe()
1589 tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN; in tegra_qspi_probe()
1595 if (tqspi->use_dma) in tegra_qspi_probe()
1596 tqspi->max_buf_size = tqspi->dma_buf_size; in tegra_qspi_probe()
1598 init_completion(&tqspi->tx_dma_complete); in tegra_qspi_probe()
1599 init_completion(&tqspi->rx_dma_complete); in tegra_qspi_probe()
1600 init_completion(&tqspi->xfer_completion); in tegra_qspi_probe()
1602 pm_runtime_enable(&pdev->dev); in tegra_qspi_probe()
1603 ret = pm_runtime_resume_and_get(&pdev->dev); in tegra_qspi_probe()
1605 dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret); in tegra_qspi_probe()
1609 if (device_reset(tqspi->dev) < 0) in tegra_qspi_probe()
1610 dev_warn_once(tqspi->dev, "device reset failed\n"); in tegra_qspi_probe()
1612 tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL; in tegra_qspi_probe()
1613 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); in tegra_qspi_probe()
1614 tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1); in tegra_qspi_probe()
1615 tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2); in tegra_qspi_probe()
1616 tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2); in tegra_qspi_probe()
1618 pm_runtime_put(&pdev->dev); in tegra_qspi_probe()
1620 ret = request_threaded_irq(tqspi->irq, NULL, in tegra_qspi_probe()
1622 dev_name(&pdev->dev), tqspi); in tegra_qspi_probe()
1624 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret); in tegra_qspi_probe()
1628 host->dev.of_node = pdev->dev.of_node; in tegra_qspi_probe()
1631 dev_err(&pdev->dev, "failed to register host: %d\n", ret); in tegra_qspi_probe()
1640 pm_runtime_force_suspend(&pdev->dev); in tegra_qspi_probe()
1651 free_irq(tqspi->irq, tqspi); in tegra_qspi_remove()
1652 pm_runtime_force_suspend(&pdev->dev); in tegra_qspi_remove()
1675 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); in tegra_qspi_resume()
1676 tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2); in tegra_qspi_resume()
1688 if (has_acpi_companion(tqspi->dev)) in tegra_qspi_runtime_suspend()
1693 clk_disable_unprepare(tqspi->clk); in tegra_qspi_runtime_suspend()
1705 if (has_acpi_companion(tqspi->dev)) in tegra_qspi_runtime_resume()
1707 ret = clk_prepare_enable(tqspi->clk); in tegra_qspi_runtime_resume()
1709 dev_err(tqspi->dev, "failed to enable clock: %d\n", ret); in tegra_qspi_runtime_resume()
1721 .name = "tegra-qspi",
1731 MODULE_ALIAS("platform:qspi-tegra");
1732 MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");