1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright (C) 2020 NVIDIA CORPORATION. 4 5 #include <linux/clk.h> 6 #include <linux/completion.h> 7 #include <linux/delay.h> 8 #include <linux/dmaengine.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/dmapool.h> 11 #include <linux/err.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/iopoll.h> 15 #include <linux/kernel.h> 16 #include <linux/kthread.h> 17 #include <linux/module.h> 18 #include <linux/platform_device.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/of.h> 21 #include <linux/reset.h> 22 #include <linux/spi/spi.h> 23 #include <linux/acpi.h> 24 #include <linux/property.h> 25 #include <linux/sizes.h> 26 27 #define QSPI_COMMAND1 0x000 28 #define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0) 29 #define QSPI_PACKED BIT(5) 30 #define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7) 31 #define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7) 32 #define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0) 33 #define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1) 34 #define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2) 35 #define QSPI_SDR_DDR_SEL BIT(9) 36 #define QSPI_TX_EN BIT(11) 37 #define QSPI_RX_EN BIT(12) 38 #define QSPI_CS_SW_VAL BIT(20) 39 #define QSPI_CS_SW_HW BIT(21) 40 41 #define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n))) 42 #define QSPI_CS_POL_INACTIVE_MASK (0xF << 22) 43 #define QSPI_CS_SEL_0 (0 << 26) 44 #define QSPI_CS_SEL_1 (1 << 26) 45 #define QSPI_CS_SEL_2 (2 << 26) 46 #define QSPI_CS_SEL_3 (3 << 26) 47 #define QSPI_CS_SEL_MASK (3 << 26) 48 #define QSPI_CS_SEL(x) (((x) & 0x3) << 26) 49 50 #define QSPI_CONTROL_MODE_0 (0 << 28) 51 #define QSPI_CONTROL_MODE_3 (3 << 28) 52 #define QSPI_CONTROL_MODE_MASK (3 << 28) 53 #define QSPI_M_S BIT(30) 54 #define QSPI_PIO BIT(31) 55 56 #define QSPI_COMMAND2 0x004 57 #define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10) 58 #define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0) 59 60 #define QSPI_CS_TIMING1 0x008 61 #define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold)) 62 63 #define QSPI_CS_TIMING2 0x00c 64 #define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0) 65 #define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5) 66 67 #define QSPI_TRANS_STATUS 0x010 68 #define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff) 69 #define QSPI_RDY BIT(30) 70 71 #define QSPI_FIFO_STATUS 0x014 72 #define QSPI_RX_FIFO_EMPTY BIT(0) 73 #define QSPI_RX_FIFO_FULL BIT(1) 74 #define QSPI_TX_FIFO_EMPTY BIT(2) 75 #define QSPI_TX_FIFO_FULL BIT(3) 76 #define QSPI_RX_FIFO_UNF BIT(4) 77 #define QSPI_RX_FIFO_OVF BIT(5) 78 #define QSPI_TX_FIFO_UNF BIT(6) 79 #define QSPI_TX_FIFO_OVF BIT(7) 80 #define QSPI_ERR BIT(8) 81 #define QSPI_TX_FIFO_FLUSH BIT(14) 82 #define QSPI_RX_FIFO_FLUSH BIT(15) 83 #define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f) 84 #define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f) 85 86 #define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \ 87 QSPI_RX_FIFO_OVF | \ 88 QSPI_TX_FIFO_UNF | \ 89 QSPI_TX_FIFO_OVF) 90 #define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \ 91 QSPI_TX_FIFO_EMPTY) 92 93 #define QSPI_TX_DATA 0x018 94 #define QSPI_RX_DATA 0x01c 95 96 #define QSPI_DMA_CTL 0x020 97 #define QSPI_TX_TRIG(n) (((n) & 0x3) << 15) 98 #define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0) 99 #define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1) 100 #define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2) 101 #define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3) 102 103 #define QSPI_RX_TRIG(n) (((n) & 0x3) << 19) 104 #define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0) 105 #define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1) 106 #define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2) 107 #define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3) 108 109 #define QSPI_DMA_EN BIT(31) 110 111 #define QSPI_DMA_BLK 0x024 112 #define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0) 113 114 #define QSPI_DMA_MEM_ADDRESS 0x028 115 #define QSPI_DMA_HI_ADDRESS 0x02c 116 117 #define QSPI_TX_FIFO 0x108 118 #define QSPI_RX_FIFO 0x188 119 120 #define QSPI_FIFO_DEPTH 64 121 122 #define QSPI_INTR_MASK 0x18c 123 #define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25) 124 #define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26) 125 #define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27) 126 #define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28) 127 #define QSPI_INTR_RDY_MASK BIT(29) 128 #define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \ 129 QSPI_INTR_RX_FIFO_OVF_MASK | \ 130 QSPI_INTR_TX_FIFO_UNF_MASK | \ 131 QSPI_INTR_TX_FIFO_OVF_MASK) 132 133 #define QSPI_MISC_REG 0x194 134 #define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0) 135 #define QSPI_DUMMY_CYCLES_MAX 0xff 136 137 #define QSPI_CMB_SEQ_CMD 0x19c 138 #define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0) 139 140 #define QSPI_CMB_SEQ_CMD_CFG 0x1a0 141 #define QSPI_COMMAND_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) 142 #define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13) 143 #define QSPI_COMMAND_SDR_DDR BIT(12) 144 #define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0) 145 146 #define QSPI_GLOBAL_CONFIG 0X1a4 147 #define QSPI_CMB_SEQ_EN BIT(0) 148 #define QSPI_TPM_WAIT_POLL_EN BIT(1) 149 150 #define QSPI_CMB_SEQ_ADDR 0x1a8 151 #define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0) 152 153 #define QSPI_CMB_SEQ_ADDR_CFG 0x1ac 154 #define QSPI_ADDRESS_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) 155 #define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13) 156 #define QSPI_ADDRESS_SDR_DDR BIT(12) 157 #define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0) 158 159 #define DATA_DIR_TX BIT(0) 160 #define DATA_DIR_RX BIT(1) 161 162 #define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) 163 #define DEFAULT_QSPI_DMA_BUF_LEN SZ_64K 164 165 enum tegra_qspi_transfer_type { 166 CMD_TRANSFER = 0, 167 ADDR_TRANSFER = 1, 168 DUMMY_TRANSFER = 2, 169 DATA_TRANSFER = 3 170 }; 171 172 struct tegra_qspi_soc_data { 173 bool cmb_xfer_capable; 174 bool supports_tpm; 175 bool has_ext_dma; 176 unsigned int cs_count; 177 }; 178 179 struct tegra_qspi_client_data { 180 int tx_clk_tap_delay; 181 int rx_clk_tap_delay; 182 }; 183 184 struct tegra_qspi { 185 struct device *dev; 186 struct spi_controller *host; 187 /* lock to protect data accessed by irq */ 188 spinlock_t lock; 189 190 struct clk *clk; 191 void __iomem *base; 192 phys_addr_t phys; 193 unsigned int irq; 194 195 u32 cur_speed; 196 unsigned int cur_pos; 197 unsigned int words_per_32bit; 198 unsigned int bytes_per_word; 199 unsigned int curr_dma_words; 200 unsigned int cur_direction; 201 202 unsigned int cur_rx_pos; 203 unsigned int cur_tx_pos; 204 205 unsigned int dma_buf_size; 206 unsigned int max_buf_size; 207 bool is_curr_dma_xfer; 208 209 struct completion rx_dma_complete; 210 struct completion tx_dma_complete; 211 212 u32 tx_status; 213 u32 rx_status; 214 u32 status_reg; 215 bool is_packed; 216 bool use_dma; 217 218 u32 command1_reg; 219 u32 dma_control_reg; 220 u32 def_command1_reg; 221 u32 def_command2_reg; 222 u32 spi_cs_timing1; 223 u32 spi_cs_timing2; 224 u8 dummy_cycles; 225 226 struct completion xfer_completion; 227 struct spi_transfer *curr_xfer; 228 229 struct dma_chan *rx_dma_chan; 230 u32 *rx_dma_buf; 231 dma_addr_t rx_dma_phys; 232 struct dma_async_tx_descriptor *rx_dma_desc; 233 234 struct dma_chan *tx_dma_chan; 235 u32 *tx_dma_buf; 236 dma_addr_t tx_dma_phys; 237 struct dma_async_tx_descriptor *tx_dma_desc; 238 const struct tegra_qspi_soc_data *soc_data; 239 }; 240 241 static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset) 242 { 243 return readl(tqspi->base + offset); 244 } 245 246 static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset) 247 { 248 writel(value, tqspi->base + offset); 249 250 /* read back register to make sure that register writes completed */ 251 if (offset != QSPI_TX_FIFO) 252 readl(tqspi->base + QSPI_COMMAND1); 253 } 254 255 static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi) 256 { 257 u32 value; 258 259 /* write 1 to clear status register */ 260 value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS); 261 tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS); 262 263 value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK); 264 if (!(value & QSPI_INTR_RDY_MASK)) { 265 value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR); 266 tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK); 267 } 268 269 /* clear fifo status error if any */ 270 value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 271 if (value & QSPI_ERR) 272 tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS); 273 } 274 275 static unsigned int 276 tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t) 277 { 278 unsigned int max_word, max_len, total_fifo_words; 279 unsigned int remain_len = t->len - tqspi->cur_pos; 280 unsigned int bits_per_word = t->bits_per_word; 281 282 tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8); 283 284 /* 285 * Tegra QSPI controller supports packed or unpacked mode transfers. 286 * Packed mode is used for data transfers using 8, 16, or 32 bits per 287 * word with a minimum transfer of 1 word and for all other transfers 288 * unpacked mode will be used. 289 */ 290 291 if ((bits_per_word == 8 || bits_per_word == 16 || 292 bits_per_word == 32) && t->len > 3) { 293 tqspi->is_packed = true; 294 tqspi->words_per_32bit = 32 / bits_per_word; 295 } else { 296 tqspi->is_packed = false; 297 tqspi->words_per_32bit = 1; 298 } 299 300 if (tqspi->is_packed) { 301 max_len = min(remain_len, tqspi->max_buf_size); 302 tqspi->curr_dma_words = max_len / tqspi->bytes_per_word; 303 total_fifo_words = (max_len + 3) / 4; 304 } else { 305 max_word = (remain_len - 1) / tqspi->bytes_per_word + 1; 306 max_word = min(max_word, tqspi->max_buf_size / 4); 307 tqspi->curr_dma_words = max_word; 308 total_fifo_words = max_word; 309 } 310 311 return total_fifo_words; 312 } 313 314 static unsigned int 315 tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 316 { 317 unsigned int written_words, fifo_words_left, count; 318 unsigned int len, tx_empty_count, max_n_32bit, i; 319 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; 320 u32 fifo_status; 321 322 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 323 tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status); 324 325 if (tqspi->is_packed) { 326 fifo_words_left = tx_empty_count * tqspi->words_per_32bit; 327 written_words = min(fifo_words_left, tqspi->curr_dma_words); 328 len = written_words * tqspi->bytes_per_word; 329 max_n_32bit = DIV_ROUND_UP(len, 4); 330 for (count = 0; count < max_n_32bit; count++) { 331 u32 x = 0; 332 333 for (i = 0; (i < 4) && len; i++, len--) 334 x |= (u32)(*tx_buf++) << (i * 8); 335 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO); 336 } 337 338 tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word; 339 } else { 340 unsigned int write_bytes; 341 u8 bytes_per_word = tqspi->bytes_per_word; 342 343 max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count); 344 written_words = max_n_32bit; 345 len = written_words * tqspi->bytes_per_word; 346 if (len > t->len - tqspi->cur_pos) 347 len = t->len - tqspi->cur_pos; 348 write_bytes = len; 349 for (count = 0; count < max_n_32bit; count++) { 350 u32 x = 0; 351 352 for (i = 0; len && (i < min(4, bytes_per_word)); i++, len--) 353 x |= (u32)(*tx_buf++) << (i * 8); 354 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO); 355 } 356 357 tqspi->cur_tx_pos += write_bytes; 358 } 359 360 return written_words; 361 } 362 363 static unsigned int 364 tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 365 { 366 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos; 367 unsigned int len, rx_full_count, count, i; 368 unsigned int read_words = 0; 369 u32 fifo_status, x; 370 371 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 372 rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status); 373 if (tqspi->is_packed) { 374 len = tqspi->curr_dma_words * tqspi->bytes_per_word; 375 for (count = 0; count < rx_full_count; count++) { 376 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO); 377 378 for (i = 0; len && (i < 4); i++, len--) 379 *rx_buf++ = (x >> i * 8) & 0xff; 380 } 381 382 read_words += tqspi->curr_dma_words; 383 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; 384 } else { 385 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; 386 u8 bytes_per_word = tqspi->bytes_per_word; 387 unsigned int read_bytes; 388 389 len = rx_full_count * bytes_per_word; 390 if (len > t->len - tqspi->cur_pos) 391 len = t->len - tqspi->cur_pos; 392 read_bytes = len; 393 for (count = 0; count < rx_full_count; count++) { 394 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask; 395 396 for (i = 0; len && (i < bytes_per_word); i++, len--) 397 *rx_buf++ = (x >> (i * 8)) & 0xff; 398 } 399 400 read_words += rx_full_count; 401 tqspi->cur_rx_pos += read_bytes; 402 } 403 404 return read_words; 405 } 406 407 static void 408 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 409 { 410 /* 411 * In packed mode, each word in FIFO may contain multiple packets 412 * based on bits per word. So all bytes in each FIFO word are valid. 413 * 414 * In unpacked mode, each word in FIFO contains single packet and 415 * based on bits per word any remaining bits in FIFO word will be 416 * ignored by the hardware and are invalid bits. 417 */ 418 if (tqspi->is_packed) { 419 tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; 420 } else { 421 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; 422 unsigned int i, count, consume, write_bytes; 423 424 /* 425 * Fill tx_dma_buf to contain single packet in each word based 426 * on bits per word from SPI core tx_buf. 427 */ 428 consume = tqspi->curr_dma_words * tqspi->bytes_per_word; 429 if (consume > t->len - tqspi->cur_pos) 430 consume = t->len - tqspi->cur_pos; 431 write_bytes = consume; 432 for (count = 0; count < tqspi->curr_dma_words; count++) { 433 u32 x = 0; 434 435 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--) 436 x |= (u32)(*tx_buf++) << (i * 8); 437 tqspi->tx_dma_buf[count] = x; 438 } 439 440 tqspi->cur_tx_pos += write_bytes; 441 } 442 } 443 444 static void 445 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 446 { 447 if (tqspi->is_packed) { 448 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; 449 } else { 450 unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos; 451 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; 452 unsigned int i, count, consume, read_bytes; 453 454 /* 455 * Each FIFO word contains single data packet. 456 * Skip invalid bits in each FIFO word based on bits per word 457 * and align bytes while filling in SPI core rx_buf. 458 */ 459 consume = tqspi->curr_dma_words * tqspi->bytes_per_word; 460 if (consume > t->len - tqspi->cur_pos) 461 consume = t->len - tqspi->cur_pos; 462 read_bytes = consume; 463 for (count = 0; count < tqspi->curr_dma_words; count++) { 464 u32 x = tqspi->rx_dma_buf[count] & rx_mask; 465 466 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--) 467 *rx_buf++ = (x >> (i * 8)) & 0xff; 468 } 469 470 tqspi->cur_rx_pos += read_bytes; 471 } 472 } 473 474 static void tegra_qspi_dma_complete(void *args) 475 { 476 struct completion *dma_complete = args; 477 478 complete(dma_complete); 479 } 480 481 static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len) 482 { 483 dma_addr_t tx_dma_phys; 484 485 reinit_completion(&tqspi->tx_dma_complete); 486 487 if (tqspi->is_packed) 488 tx_dma_phys = t->tx_dma; 489 else 490 tx_dma_phys = tqspi->tx_dma_phys; 491 492 tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys, 493 len, DMA_MEM_TO_DEV, 494 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 495 496 if (!tqspi->tx_dma_desc) { 497 dev_err(tqspi->dev, "Unable to get TX descriptor\n"); 498 return -EIO; 499 } 500 501 tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete; 502 tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete; 503 dmaengine_submit(tqspi->tx_dma_desc); 504 dma_async_issue_pending(tqspi->tx_dma_chan); 505 506 return 0; 507 } 508 509 static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len) 510 { 511 dma_addr_t rx_dma_phys; 512 513 reinit_completion(&tqspi->rx_dma_complete); 514 515 if (tqspi->is_packed) 516 rx_dma_phys = t->rx_dma; 517 else 518 rx_dma_phys = tqspi->rx_dma_phys; 519 520 tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys, 521 len, DMA_DEV_TO_MEM, 522 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 523 524 if (!tqspi->rx_dma_desc) { 525 dev_err(tqspi->dev, "Unable to get RX descriptor\n"); 526 return -EIO; 527 } 528 529 tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete; 530 tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete; 531 dmaengine_submit(tqspi->rx_dma_desc); 532 dma_async_issue_pending(tqspi->rx_dma_chan); 533 534 return 0; 535 } 536 537 static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic) 538 { 539 void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS; 540 u32 val; 541 542 val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 543 if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY) 544 return 0; 545 546 val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH; 547 tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS); 548 549 if (!atomic) 550 return readl_relaxed_poll_timeout(addr, val, 551 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY, 552 1000, 1000000); 553 554 return readl_relaxed_poll_timeout_atomic(addr, val, 555 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY, 556 1000, 1000000); 557 } 558 559 static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi) 560 { 561 u32 intr_mask; 562 563 intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK); 564 intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR); 565 tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK); 566 } 567 568 static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 569 { 570 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; 571 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos; 572 unsigned int len; 573 574 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 575 576 if (t->tx_buf) { 577 t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE); 578 if (dma_mapping_error(tqspi->dev, t->tx_dma)) 579 return -ENOMEM; 580 } 581 582 if (t->rx_buf) { 583 t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE); 584 if (dma_mapping_error(tqspi->dev, t->rx_dma)) { 585 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); 586 return -ENOMEM; 587 } 588 } 589 590 return 0; 591 } 592 593 static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 594 { 595 unsigned int len; 596 597 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 598 599 if (t->tx_buf) 600 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); 601 if (t->rx_buf) 602 dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); 603 } 604 605 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 606 { 607 struct dma_slave_config dma_sconfig = { 0 }; 608 dma_addr_t rx_dma_phys, tx_dma_phys; 609 unsigned int len; 610 u8 dma_burst; 611 int ret = 0; 612 u32 val; 613 614 if (tqspi->is_packed) { 615 ret = tegra_qspi_dma_map_xfer(tqspi, t); 616 if (ret < 0) 617 return ret; 618 } 619 620 val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1); 621 tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK); 622 623 tegra_qspi_unmask_irq(tqspi); 624 625 if (tqspi->is_packed) 626 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 627 else 628 len = tqspi->curr_dma_words * 4; 629 630 /* set attention level based on length of transfer */ 631 if (tqspi->soc_data->has_ext_dma) { 632 val = 0; 633 if (len & 0xf) { 634 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1; 635 dma_burst = 1; 636 } else if (((len) >> 4) & 0x1) { 637 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4; 638 dma_burst = 4; 639 } else { 640 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8; 641 dma_burst = 8; 642 } 643 644 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); 645 } 646 647 tqspi->dma_control_reg = val; 648 649 dma_sconfig.device_fc = true; 650 651 if (tqspi->cur_direction & DATA_DIR_TX) { 652 if (tqspi->tx_dma_chan) { 653 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; 654 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 655 dma_sconfig.dst_maxburst = dma_burst; 656 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); 657 if (ret < 0) { 658 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 659 return ret; 660 } 661 662 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); 663 ret = tegra_qspi_start_tx_dma(tqspi, t, len); 664 if (ret < 0) { 665 dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); 666 return ret; 667 } 668 } else { 669 if (tqspi->is_packed) 670 tx_dma_phys = t->tx_dma; 671 else 672 tx_dma_phys = tqspi->tx_dma_phys; 673 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); 674 tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys), 675 QSPI_DMA_MEM_ADDRESS); 676 tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff), 677 QSPI_DMA_HI_ADDRESS); 678 } 679 } 680 681 if (tqspi->cur_direction & DATA_DIR_RX) { 682 if (tqspi->rx_dma_chan) { 683 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; 684 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 685 dma_sconfig.src_maxburst = dma_burst; 686 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); 687 if (ret < 0) { 688 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 689 return ret; 690 } 691 692 ret = tegra_qspi_start_rx_dma(tqspi, t, len); 693 if (ret < 0) { 694 dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); 695 if (tqspi->cur_direction & DATA_DIR_TX) 696 dmaengine_terminate_all(tqspi->tx_dma_chan); 697 return ret; 698 } 699 } else { 700 if (tqspi->is_packed) 701 rx_dma_phys = t->rx_dma; 702 else 703 rx_dma_phys = tqspi->rx_dma_phys; 704 705 tegra_qspi_writel(tqspi, lower_32_bits(rx_dma_phys), 706 QSPI_DMA_MEM_ADDRESS); 707 tegra_qspi_writel(tqspi, (upper_32_bits(rx_dma_phys) & 0xff), 708 QSPI_DMA_HI_ADDRESS); 709 } 710 } 711 712 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); 713 714 tqspi->is_curr_dma_xfer = true; 715 tqspi->dma_control_reg = val; 716 val |= QSPI_DMA_EN; 717 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); 718 719 return ret; 720 } 721 722 static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t) 723 { 724 u32 val; 725 unsigned int cur_words; 726 727 if (qspi->cur_direction & DATA_DIR_TX) 728 cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t); 729 else 730 cur_words = qspi->curr_dma_words; 731 732 val = QSPI_DMA_BLK_SET(cur_words - 1); 733 tegra_qspi_writel(qspi, val, QSPI_DMA_BLK); 734 735 tegra_qspi_unmask_irq(qspi); 736 737 qspi->is_curr_dma_xfer = false; 738 val = qspi->command1_reg; 739 val |= QSPI_PIO; 740 tegra_qspi_writel(qspi, val, QSPI_COMMAND1); 741 742 return 0; 743 } 744 745 static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi) 746 { 747 if (tqspi->tx_dma_buf) { 748 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, 749 tqspi->tx_dma_buf, tqspi->tx_dma_phys); 750 tqspi->tx_dma_buf = NULL; 751 } 752 753 if (tqspi->tx_dma_chan) { 754 dma_release_channel(tqspi->tx_dma_chan); 755 tqspi->tx_dma_chan = NULL; 756 } 757 758 if (tqspi->rx_dma_buf) { 759 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, 760 tqspi->rx_dma_buf, tqspi->rx_dma_phys); 761 tqspi->rx_dma_buf = NULL; 762 } 763 764 if (tqspi->rx_dma_chan) { 765 dma_release_channel(tqspi->rx_dma_chan); 766 tqspi->rx_dma_chan = NULL; 767 } 768 } 769 770 static int tegra_qspi_init_dma(struct tegra_qspi *tqspi) 771 { 772 struct dma_chan *dma_chan; 773 dma_addr_t dma_phys; 774 u32 *dma_buf; 775 int err; 776 777 if (tqspi->soc_data->has_ext_dma) { 778 dma_chan = dma_request_chan(tqspi->dev, "rx"); 779 if (IS_ERR(dma_chan)) { 780 err = PTR_ERR(dma_chan); 781 goto err_out; 782 } 783 784 tqspi->rx_dma_chan = dma_chan; 785 786 dma_chan = dma_request_chan(tqspi->dev, "tx"); 787 if (IS_ERR(dma_chan)) { 788 err = PTR_ERR(dma_chan); 789 goto err_out; 790 } 791 792 tqspi->tx_dma_chan = dma_chan; 793 } else { 794 if (!device_iommu_mapped(tqspi->dev)) { 795 dev_warn(tqspi->dev, 796 "IOMMU not enabled in device-tree, falling back to PIO mode\n"); 797 return 0; 798 } 799 } 800 801 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); 802 if (!dma_buf) { 803 err = -ENOMEM; 804 goto err_out; 805 } 806 807 tqspi->rx_dma_buf = dma_buf; 808 tqspi->rx_dma_phys = dma_phys; 809 810 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); 811 if (!dma_buf) { 812 err = -ENOMEM; 813 goto err_out; 814 } 815 816 tqspi->tx_dma_buf = dma_buf; 817 tqspi->tx_dma_phys = dma_phys; 818 tqspi->use_dma = true; 819 820 return 0; 821 822 err_out: 823 tegra_qspi_deinit_dma(tqspi); 824 825 if (err != -EPROBE_DEFER) { 826 dev_err(tqspi->dev, "cannot use DMA: %d\n", err); 827 dev_err(tqspi->dev, "falling back to PIO\n"); 828 return 0; 829 } 830 831 return err; 832 } 833 834 static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t, 835 bool is_first_of_msg) 836 { 837 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 838 struct tegra_qspi_client_data *cdata = spi->controller_data; 839 u32 command1, command2, speed = t->speed_hz; 840 u8 bits_per_word = t->bits_per_word; 841 u32 tx_tap = 0, rx_tap = 0; 842 int req_mode; 843 844 if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) { 845 clk_set_rate(tqspi->clk, speed); 846 tqspi->cur_speed = speed; 847 } 848 849 tqspi->cur_pos = 0; 850 tqspi->cur_rx_pos = 0; 851 tqspi->cur_tx_pos = 0; 852 tqspi->curr_xfer = t; 853 854 if (is_first_of_msg) { 855 tegra_qspi_mask_clear_irq(tqspi); 856 857 command1 = tqspi->def_command1_reg; 858 command1 |= QSPI_CS_SEL(spi_get_chipselect(spi, 0)); 859 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); 860 861 command1 &= ~QSPI_CONTROL_MODE_MASK; 862 req_mode = spi->mode & 0x3; 863 if (req_mode == SPI_MODE_3) 864 command1 |= QSPI_CONTROL_MODE_3; 865 else 866 command1 |= QSPI_CONTROL_MODE_0; 867 868 if (spi->mode & SPI_CS_HIGH) 869 command1 |= QSPI_CS_SW_VAL; 870 else 871 command1 &= ~QSPI_CS_SW_VAL; 872 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1); 873 874 if (cdata && cdata->tx_clk_tap_delay) 875 tx_tap = cdata->tx_clk_tap_delay; 876 877 if (cdata && cdata->rx_clk_tap_delay) 878 rx_tap = cdata->rx_clk_tap_delay; 879 880 command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap); 881 if (command2 != tqspi->def_command2_reg) 882 tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2); 883 884 } else { 885 command1 = tqspi->command1_reg; 886 command1 &= ~QSPI_BIT_LENGTH(~0); 887 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); 888 } 889 890 command1 &= ~QSPI_SDR_DDR_SEL; 891 892 return command1; 893 } 894 895 static int tegra_qspi_start_transfer_one(struct spi_device *spi, 896 struct spi_transfer *t, u32 command1) 897 { 898 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 899 unsigned int total_fifo_words; 900 u8 bus_width = 0; 901 int ret; 902 903 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t); 904 905 command1 &= ~QSPI_PACKED; 906 if (tqspi->is_packed) 907 command1 |= QSPI_PACKED; 908 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1); 909 910 tqspi->cur_direction = 0; 911 912 command1 &= ~(QSPI_TX_EN | QSPI_RX_EN); 913 if (t->rx_buf) { 914 command1 |= QSPI_RX_EN; 915 tqspi->cur_direction |= DATA_DIR_RX; 916 bus_width = t->rx_nbits; 917 } 918 919 if (t->tx_buf) { 920 command1 |= QSPI_TX_EN; 921 tqspi->cur_direction |= DATA_DIR_TX; 922 bus_width = t->tx_nbits; 923 } 924 925 command1 &= ~QSPI_INTERFACE_WIDTH_MASK; 926 927 if (bus_width == SPI_NBITS_QUAD) 928 command1 |= QSPI_INTERFACE_WIDTH_QUAD; 929 else if (bus_width == SPI_NBITS_DUAL) 930 command1 |= QSPI_INTERFACE_WIDTH_DUAL; 931 else 932 command1 |= QSPI_INTERFACE_WIDTH_SINGLE; 933 934 tqspi->command1_reg = command1; 935 936 tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG); 937 938 ret = tegra_qspi_flush_fifos(tqspi, false); 939 if (ret < 0) 940 return ret; 941 942 if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH) 943 ret = tegra_qspi_start_dma_based_transfer(tqspi, t); 944 else 945 ret = tegra_qspi_start_cpu_based_transfer(tqspi, t); 946 947 return ret; 948 } 949 950 static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi) 951 { 952 struct tegra_qspi_client_data *cdata; 953 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 954 955 cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL); 956 if (!cdata) 957 return NULL; 958 959 device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay", 960 &cdata->tx_clk_tap_delay); 961 device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay", 962 &cdata->rx_clk_tap_delay); 963 964 return cdata; 965 } 966 967 static int tegra_qspi_setup(struct spi_device *spi) 968 { 969 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 970 struct tegra_qspi_client_data *cdata = spi->controller_data; 971 unsigned long flags; 972 u32 val; 973 int ret; 974 975 ret = pm_runtime_resume_and_get(tqspi->dev); 976 if (ret < 0) { 977 dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret); 978 return ret; 979 } 980 981 if (!cdata) { 982 cdata = tegra_qspi_parse_cdata_dt(spi); 983 spi->controller_data = cdata; 984 } 985 spin_lock_irqsave(&tqspi->lock, flags); 986 987 /* keep default cs state to inactive */ 988 val = tqspi->def_command1_reg; 989 val |= QSPI_CS_SEL(spi_get_chipselect(spi, 0)); 990 if (spi->mode & SPI_CS_HIGH) 991 val &= ~QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0)); 992 else 993 val |= QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0)); 994 995 tqspi->def_command1_reg = val; 996 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); 997 998 spin_unlock_irqrestore(&tqspi->lock, flags); 999 1000 pm_runtime_put(tqspi->dev); 1001 1002 return 0; 1003 } 1004 1005 static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi) 1006 { 1007 dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n"); 1008 dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n", 1009 tegra_qspi_readl(tqspi, QSPI_COMMAND1), 1010 tegra_qspi_readl(tqspi, QSPI_COMMAND2)); 1011 dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n", 1012 tegra_qspi_readl(tqspi, QSPI_DMA_CTL), 1013 tegra_qspi_readl(tqspi, QSPI_DMA_BLK)); 1014 dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n", 1015 tegra_qspi_readl(tqspi, QSPI_INTR_MASK), 1016 tegra_qspi_readl(tqspi, QSPI_MISC_REG)); 1017 dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n", 1018 tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS), 1019 tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS)); 1020 } 1021 1022 static void tegra_qspi_reset(struct tegra_qspi *tqspi) 1023 { 1024 if (device_reset(tqspi->dev) < 0) { 1025 dev_warn_once(tqspi->dev, "device reset failed\n"); 1026 tegra_qspi_mask_clear_irq(tqspi); 1027 } 1028 } 1029 1030 static void tegra_qspi_handle_error(struct tegra_qspi *tqspi) 1031 { 1032 dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg); 1033 tegra_qspi_dump_regs(tqspi); 1034 tegra_qspi_flush_fifos(tqspi, true); 1035 tegra_qspi_reset(tqspi); 1036 } 1037 1038 static void tegra_qspi_transfer_end(struct spi_device *spi) 1039 { 1040 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 1041 int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1; 1042 1043 if (cs_val) 1044 tqspi->command1_reg |= QSPI_CS_SW_VAL; 1045 else 1046 tqspi->command1_reg &= ~QSPI_CS_SW_VAL; 1047 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); 1048 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); 1049 } 1050 1051 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi); 1052 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi); 1053 1054 /** 1055 * tegra_qspi_handle_timeout - Handle transfer timeout with hardware check 1056 * @tqspi: QSPI controller instance 1057 * 1058 * When a timeout occurs but hardware has completed the transfer (interrupt 1059 * was lost or delayed), manually trigger transfer completion processing. 1060 * This avoids failing transfers that actually succeeded. 1061 * 1062 * Returns: 0 if transfer was completed, -ETIMEDOUT if real timeout 1063 */ 1064 static int tegra_qspi_handle_timeout(struct tegra_qspi *tqspi) 1065 { 1066 irqreturn_t ret; 1067 u32 status; 1068 1069 /* Check if hardware actually completed the transfer */ 1070 status = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS); 1071 if (!(status & QSPI_RDY)) 1072 return -ETIMEDOUT; 1073 1074 /* 1075 * Hardware completed but interrupt was lost/delayed. Manually 1076 * process the completion by calling the appropriate handler. 1077 */ 1078 dev_warn_ratelimited(tqspi->dev, 1079 "QSPI interrupt timeout, but transfer complete\n"); 1080 1081 /* Clear the transfer status */ 1082 status = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS); 1083 tegra_qspi_writel(tqspi, status, QSPI_TRANS_STATUS); 1084 1085 /* Manually trigger completion handler */ 1086 if (!tqspi->is_curr_dma_xfer) 1087 ret = handle_cpu_based_xfer(tqspi); 1088 else 1089 ret = handle_dma_based_xfer(tqspi); 1090 1091 return (ret == IRQ_HANDLED) ? 0 : -EIO; 1092 } 1093 1094 static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len) 1095 { 1096 u32 cmd_config = 0; 1097 1098 /* Extract Command configuration and value */ 1099 if (is_ddr) 1100 cmd_config |= QSPI_COMMAND_SDR_DDR; 1101 else 1102 cmd_config &= ~QSPI_COMMAND_SDR_DDR; 1103 1104 cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width); 1105 cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1); 1106 1107 return cmd_config; 1108 } 1109 1110 static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len) 1111 { 1112 u32 addr_config = 0; 1113 1114 if (is_ddr) 1115 addr_config |= QSPI_ADDRESS_SDR_DDR; 1116 else 1117 addr_config &= ~QSPI_ADDRESS_SDR_DDR; 1118 1119 addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width); 1120 addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1); 1121 1122 return addr_config; 1123 } 1124 1125 static void tegra_qspi_dma_stop(struct tegra_qspi *tqspi) 1126 { 1127 u32 value; 1128 1129 if ((tqspi->cur_direction & DATA_DIR_TX) && tqspi->tx_dma_chan) 1130 dmaengine_terminate_all(tqspi->tx_dma_chan); 1131 1132 if ((tqspi->cur_direction & DATA_DIR_RX) && tqspi->rx_dma_chan) 1133 dmaengine_terminate_all(tqspi->rx_dma_chan); 1134 1135 value = tegra_qspi_readl(tqspi, QSPI_DMA_CTL); 1136 value &= ~QSPI_DMA_EN; 1137 tegra_qspi_writel(tqspi, value, QSPI_DMA_CTL); 1138 } 1139 1140 static void tegra_qspi_pio_stop(struct tegra_qspi *tqspi) 1141 { 1142 u32 value; 1143 1144 value = tegra_qspi_readl(tqspi, QSPI_COMMAND1); 1145 value &= ~QSPI_PIO; 1146 tegra_qspi_writel(tqspi, value, QSPI_COMMAND1); 1147 } 1148 1149 static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, 1150 struct spi_message *msg) 1151 { 1152 bool is_first_msg = true; 1153 struct spi_transfer *xfer; 1154 struct spi_device *spi = msg->spi; 1155 u8 transfer_phase = 0; 1156 u32 cmd1 = 0; 1157 int ret = 0; 1158 u32 address_value = 0; 1159 u32 cmd_config = 0, addr_config = 0; 1160 u8 cmd_value = 0, val = 0; 1161 1162 /* Enable Combined sequence mode */ 1163 val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG); 1164 if (spi->mode & SPI_TPM_HW_FLOW) { 1165 if (tqspi->soc_data->supports_tpm) 1166 val |= QSPI_TPM_WAIT_POLL_EN; 1167 else 1168 return -EIO; 1169 } 1170 val |= QSPI_CMB_SEQ_EN; 1171 tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG); 1172 /* Process individual transfer list */ 1173 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1174 switch (transfer_phase) { 1175 case CMD_TRANSFER: 1176 /* X1 SDR mode */ 1177 cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits, 1178 xfer->len); 1179 cmd_value = *((const u8 *)(xfer->tx_buf)); 1180 break; 1181 case ADDR_TRANSFER: 1182 /* X1 SDR mode */ 1183 addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits, 1184 xfer->len); 1185 address_value = *((const u32 *)(xfer->tx_buf)); 1186 break; 1187 case DUMMY_TRANSFER: 1188 if (xfer->dummy_data) { 1189 tqspi->dummy_cycles = xfer->len * 8 / xfer->tx_nbits; 1190 break; 1191 } 1192 transfer_phase++; 1193 fallthrough; 1194 case DATA_TRANSFER: 1195 /* Program Command, Address value in register */ 1196 tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD); 1197 tegra_qspi_writel(tqspi, address_value, 1198 QSPI_CMB_SEQ_ADDR); 1199 /* Program Command and Address config in register */ 1200 tegra_qspi_writel(tqspi, cmd_config, 1201 QSPI_CMB_SEQ_CMD_CFG); 1202 tegra_qspi_writel(tqspi, addr_config, 1203 QSPI_CMB_SEQ_ADDR_CFG); 1204 1205 reinit_completion(&tqspi->xfer_completion); 1206 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, 1207 is_first_msg); 1208 ret = tegra_qspi_start_transfer_one(spi, xfer, 1209 cmd1); 1210 1211 if (ret < 0) { 1212 dev_err(tqspi->dev, "Failed to start transfer-one: %d\n", 1213 ret); 1214 return ret; 1215 } 1216 1217 is_first_msg = false; 1218 ret = wait_for_completion_timeout 1219 (&tqspi->xfer_completion, 1220 QSPI_DMA_TIMEOUT); 1221 1222 if (WARN_ON_ONCE(ret == 0)) { 1223 /* 1224 * Check if hardware completed the transfer 1225 * even though interrupt was lost or delayed. 1226 * If so, process the completion and continue. 1227 */ 1228 ret = tegra_qspi_handle_timeout(tqspi); 1229 if (ret < 0) { 1230 /* Real timeout - clean up and fail */ 1231 dev_err(tqspi->dev, "transfer timeout\n"); 1232 1233 /* Abort transfer by resetting pio/dma bit */ 1234 if (tqspi->is_curr_dma_xfer) 1235 tegra_qspi_dma_stop(tqspi); 1236 else 1237 tegra_qspi_pio_stop(tqspi); 1238 1239 /* Reset controller if timeout happens */ 1240 tegra_qspi_reset(tqspi); 1241 1242 ret = -EIO; 1243 goto exit; 1244 } 1245 } 1246 1247 if (tqspi->tx_status || tqspi->rx_status) { 1248 dev_err(tqspi->dev, "QSPI Transfer failed\n"); 1249 tqspi->tx_status = 0; 1250 tqspi->rx_status = 0; 1251 ret = -EIO; 1252 goto exit; 1253 } 1254 break; 1255 default: 1256 ret = -EINVAL; 1257 goto exit; 1258 } 1259 msg->actual_length += xfer->len; 1260 if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) { 1261 tegra_qspi_transfer_end(spi); 1262 spi_transfer_delay_exec(xfer); 1263 } 1264 tqspi->curr_xfer = NULL; 1265 transfer_phase++; 1266 } 1267 ret = 0; 1268 1269 exit: 1270 tqspi->curr_xfer = NULL; 1271 msg->status = ret; 1272 1273 return ret; 1274 } 1275 1276 static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi, 1277 struct spi_message *msg) 1278 { 1279 struct spi_device *spi = msg->spi; 1280 struct spi_transfer *transfer; 1281 bool is_first_msg = true; 1282 int ret = 0, val = 0; 1283 1284 msg->status = 0; 1285 msg->actual_length = 0; 1286 tqspi->tx_status = 0; 1287 tqspi->rx_status = 0; 1288 1289 /* Disable Combined sequence mode */ 1290 val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG); 1291 val &= ~QSPI_CMB_SEQ_EN; 1292 if (tqspi->soc_data->supports_tpm) 1293 val &= ~QSPI_TPM_WAIT_POLL_EN; 1294 tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG); 1295 list_for_each_entry(transfer, &msg->transfers, transfer_list) { 1296 struct spi_transfer *xfer = transfer; 1297 u8 dummy_bytes = 0; 1298 u32 cmd1; 1299 1300 tqspi->dummy_cycles = 0; 1301 /* 1302 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer 1303 * bytes based on programmed dummy clock cycles in the QSPI_MISC register. 1304 * So, check if the next transfer is dummy data transfer and program dummy 1305 * clock cycles along with the current transfer and skip next transfer. 1306 */ 1307 if (!list_is_last(&xfer->transfer_list, &msg->transfers)) { 1308 struct spi_transfer *next_xfer; 1309 1310 next_xfer = list_next_entry(xfer, transfer_list); 1311 if (next_xfer->dummy_data) { 1312 u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits; 1313 1314 if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) { 1315 tqspi->dummy_cycles = dummy_cycles; 1316 dummy_bytes = next_xfer->len; 1317 transfer = next_xfer; 1318 } 1319 } 1320 } 1321 1322 reinit_completion(&tqspi->xfer_completion); 1323 1324 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg); 1325 1326 ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1); 1327 if (ret < 0) { 1328 dev_err(tqspi->dev, "failed to start transfer: %d\n", ret); 1329 goto complete_xfer; 1330 } 1331 1332 ret = wait_for_completion_timeout(&tqspi->xfer_completion, 1333 QSPI_DMA_TIMEOUT); 1334 if (WARN_ON(ret == 0)) { 1335 /* 1336 * Check if hardware completed the transfer even though 1337 * interrupt was lost or delayed. If so, process the 1338 * completion and continue. 1339 */ 1340 ret = tegra_qspi_handle_timeout(tqspi); 1341 if (ret < 0) { 1342 /* Real timeout - clean up and fail */ 1343 dev_err(tqspi->dev, "transfer timeout\n"); 1344 1345 if (tqspi->is_curr_dma_xfer) 1346 tegra_qspi_dma_stop(tqspi); 1347 1348 tegra_qspi_handle_error(tqspi); 1349 ret = -EIO; 1350 goto complete_xfer; 1351 } 1352 } 1353 1354 if (tqspi->tx_status || tqspi->rx_status) { 1355 tegra_qspi_handle_error(tqspi); 1356 ret = -EIO; 1357 goto complete_xfer; 1358 } 1359 1360 msg->actual_length += xfer->len + dummy_bytes; 1361 1362 complete_xfer: 1363 tqspi->curr_xfer = NULL; 1364 1365 if (ret < 0) { 1366 tegra_qspi_transfer_end(spi); 1367 spi_transfer_delay_exec(xfer); 1368 goto exit; 1369 } 1370 1371 if (list_is_last(&xfer->transfer_list, &msg->transfers)) { 1372 /* de-activate CS after last transfer only when cs_change is not set */ 1373 if (!xfer->cs_change) { 1374 tegra_qspi_transfer_end(spi); 1375 spi_transfer_delay_exec(xfer); 1376 } 1377 } else if (xfer->cs_change) { 1378 /* de-activated CS between the transfers only when cs_change is set */ 1379 tegra_qspi_transfer_end(spi); 1380 spi_transfer_delay_exec(xfer); 1381 } 1382 } 1383 1384 ret = 0; 1385 exit: 1386 msg->status = ret; 1387 1388 return ret; 1389 } 1390 1391 static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi, 1392 struct spi_message *msg) 1393 { 1394 int transfer_count = 0; 1395 struct spi_transfer *xfer; 1396 1397 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1398 transfer_count++; 1399 } 1400 if (!tqspi->soc_data->cmb_xfer_capable) 1401 return false; 1402 if (transfer_count > 4 || transfer_count < 3) 1403 return false; 1404 xfer = list_first_entry(&msg->transfers, typeof(*xfer), 1405 transfer_list); 1406 if (xfer->len > 2) 1407 return false; 1408 xfer = list_next_entry(xfer, transfer_list); 1409 if (xfer->len > 4 || xfer->len < 3) 1410 return false; 1411 xfer = list_next_entry(xfer, transfer_list); 1412 if (transfer_count == 4) { 1413 if (xfer->dummy_data != 1) 1414 return false; 1415 if ((xfer->len * 8 / xfer->tx_nbits) > QSPI_DUMMY_CYCLES_MAX) 1416 return false; 1417 xfer = list_next_entry(xfer, transfer_list); 1418 } 1419 if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) 1420 return false; 1421 1422 return true; 1423 } 1424 1425 static int tegra_qspi_transfer_one_message(struct spi_controller *host, 1426 struct spi_message *msg) 1427 { 1428 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1429 int ret; 1430 1431 if (tegra_qspi_validate_cmb_seq(tqspi, msg)) 1432 ret = tegra_qspi_combined_seq_xfer(tqspi, msg); 1433 else 1434 ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg); 1435 1436 spi_finalize_current_message(host); 1437 1438 return ret; 1439 } 1440 1441 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi) 1442 { 1443 struct spi_transfer *t = tqspi->curr_xfer; 1444 unsigned long flags; 1445 1446 spin_lock_irqsave(&tqspi->lock, flags); 1447 1448 if (tqspi->tx_status || tqspi->rx_status) { 1449 tegra_qspi_handle_error(tqspi); 1450 complete(&tqspi->xfer_completion); 1451 goto exit; 1452 } 1453 1454 if (tqspi->cur_direction & DATA_DIR_RX) 1455 tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t); 1456 1457 if (tqspi->cur_direction & DATA_DIR_TX) 1458 tqspi->cur_pos = tqspi->cur_tx_pos; 1459 else 1460 tqspi->cur_pos = tqspi->cur_rx_pos; 1461 1462 if (tqspi->cur_pos == t->len) { 1463 complete(&tqspi->xfer_completion); 1464 goto exit; 1465 } 1466 1467 tegra_qspi_calculate_curr_xfer_param(tqspi, t); 1468 tegra_qspi_start_cpu_based_transfer(tqspi, t); 1469 exit: 1470 tqspi->curr_xfer = NULL; 1471 spin_unlock_irqrestore(&tqspi->lock, flags); 1472 return IRQ_HANDLED; 1473 } 1474 1475 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) 1476 { 1477 struct spi_transfer *t = tqspi->curr_xfer; 1478 unsigned int total_fifo_words; 1479 unsigned long flags; 1480 long wait_status; 1481 int num_errors = 0; 1482 1483 if (tqspi->cur_direction & DATA_DIR_TX) { 1484 if (tqspi->tx_status) { 1485 if (tqspi->tx_dma_chan) 1486 dmaengine_terminate_all(tqspi->tx_dma_chan); 1487 num_errors++; 1488 } else if (tqspi->tx_dma_chan) { 1489 wait_status = wait_for_completion_interruptible_timeout( 1490 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT); 1491 if (wait_status <= 0) { 1492 dmaengine_terminate_all(tqspi->tx_dma_chan); 1493 dev_err(tqspi->dev, "failed TX DMA transfer\n"); 1494 num_errors++; 1495 } 1496 } 1497 } 1498 1499 if (tqspi->cur_direction & DATA_DIR_RX) { 1500 if (tqspi->rx_status) { 1501 if (tqspi->rx_dma_chan) 1502 dmaengine_terminate_all(tqspi->rx_dma_chan); 1503 num_errors++; 1504 } else if (tqspi->rx_dma_chan) { 1505 wait_status = wait_for_completion_interruptible_timeout( 1506 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT); 1507 if (wait_status <= 0) { 1508 dmaengine_terminate_all(tqspi->rx_dma_chan); 1509 dev_err(tqspi->dev, "failed RX DMA transfer\n"); 1510 num_errors++; 1511 } 1512 } 1513 } 1514 1515 spin_lock_irqsave(&tqspi->lock, flags); 1516 1517 if (num_errors) { 1518 tegra_qspi_dma_unmap_xfer(tqspi, t); 1519 tegra_qspi_handle_error(tqspi); 1520 complete(&tqspi->xfer_completion); 1521 goto exit; 1522 } 1523 1524 if (tqspi->cur_direction & DATA_DIR_RX) 1525 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t); 1526 1527 if (tqspi->cur_direction & DATA_DIR_TX) 1528 tqspi->cur_pos = tqspi->cur_tx_pos; 1529 else 1530 tqspi->cur_pos = tqspi->cur_rx_pos; 1531 1532 if (tqspi->cur_pos == t->len) { 1533 tegra_qspi_dma_unmap_xfer(tqspi, t); 1534 complete(&tqspi->xfer_completion); 1535 goto exit; 1536 } 1537 1538 tegra_qspi_dma_unmap_xfer(tqspi, t); 1539 1540 /* continue transfer in current message */ 1541 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t); 1542 if (total_fifo_words > QSPI_FIFO_DEPTH) 1543 num_errors = tegra_qspi_start_dma_based_transfer(tqspi, t); 1544 else 1545 num_errors = tegra_qspi_start_cpu_based_transfer(tqspi, t); 1546 1547 exit: 1548 spin_unlock_irqrestore(&tqspi->lock, flags); 1549 return IRQ_HANDLED; 1550 } 1551 1552 static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) 1553 { 1554 struct tegra_qspi *tqspi = context_data; 1555 1556 /* 1557 * Occasionally the IRQ thread takes a long time to wake up (usually 1558 * when the CPU that it's running on is excessively busy) and we have 1559 * already reached the timeout before and cleaned up the timed out 1560 * transfer. Avoid any processing in that case and bail out early. 1561 */ 1562 if (!tqspi->curr_xfer) 1563 return IRQ_NONE; 1564 1565 tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 1566 1567 if (tqspi->cur_direction & DATA_DIR_TX) 1568 tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF); 1569 1570 if (tqspi->cur_direction & DATA_DIR_RX) 1571 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF); 1572 1573 tegra_qspi_mask_clear_irq(tqspi); 1574 1575 if (!tqspi->is_curr_dma_xfer) 1576 return handle_cpu_based_xfer(tqspi); 1577 1578 return handle_dma_based_xfer(tqspi); 1579 } 1580 1581 static struct tegra_qspi_soc_data tegra210_qspi_soc_data = { 1582 .has_ext_dma = true, 1583 .cmb_xfer_capable = false, 1584 .supports_tpm = false, 1585 .cs_count = 1, 1586 }; 1587 1588 static struct tegra_qspi_soc_data tegra186_qspi_soc_data = { 1589 .has_ext_dma = true, 1590 .cmb_xfer_capable = true, 1591 .supports_tpm = false, 1592 .cs_count = 1, 1593 }; 1594 1595 static struct tegra_qspi_soc_data tegra234_qspi_soc_data = { 1596 .has_ext_dma = false, 1597 .cmb_xfer_capable = true, 1598 .supports_tpm = true, 1599 .cs_count = 1, 1600 }; 1601 1602 static struct tegra_qspi_soc_data tegra241_qspi_soc_data = { 1603 .has_ext_dma = true, 1604 .cmb_xfer_capable = true, 1605 .supports_tpm = true, 1606 .cs_count = 4, 1607 }; 1608 1609 static const struct of_device_id tegra_qspi_of_match[] = { 1610 { 1611 .compatible = "nvidia,tegra210-qspi", 1612 .data = &tegra210_qspi_soc_data, 1613 }, { 1614 .compatible = "nvidia,tegra186-qspi", 1615 .data = &tegra186_qspi_soc_data, 1616 }, { 1617 .compatible = "nvidia,tegra194-qspi", 1618 .data = &tegra186_qspi_soc_data, 1619 }, { 1620 .compatible = "nvidia,tegra234-qspi", 1621 .data = &tegra234_qspi_soc_data, 1622 }, { 1623 .compatible = "nvidia,tegra241-qspi", 1624 .data = &tegra241_qspi_soc_data, 1625 }, 1626 {} 1627 }; 1628 1629 MODULE_DEVICE_TABLE(of, tegra_qspi_of_match); 1630 1631 #ifdef CONFIG_ACPI 1632 static const struct acpi_device_id tegra_qspi_acpi_match[] = { 1633 { 1634 .id = "NVDA1213", 1635 .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data, 1636 }, { 1637 .id = "NVDA1313", 1638 .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data, 1639 }, { 1640 .id = "NVDA1413", 1641 .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data, 1642 }, { 1643 .id = "NVDA1513", 1644 .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data, 1645 }, 1646 {} 1647 }; 1648 1649 MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match); 1650 #endif 1651 1652 static int tegra_qspi_probe(struct platform_device *pdev) 1653 { 1654 struct spi_controller *host; 1655 struct tegra_qspi *tqspi; 1656 struct resource *r; 1657 int ret, qspi_irq; 1658 int bus_num; 1659 1660 host = devm_spi_alloc_host(&pdev->dev, sizeof(*tqspi)); 1661 if (!host) 1662 return -ENOMEM; 1663 1664 platform_set_drvdata(pdev, host); 1665 tqspi = spi_controller_get_devdata(host); 1666 1667 host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH | 1668 SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; 1669 host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); 1670 host->flags = SPI_CONTROLLER_HALF_DUPLEX; 1671 host->setup = tegra_qspi_setup; 1672 host->transfer_one_message = tegra_qspi_transfer_one_message; 1673 host->num_chipselect = 1; 1674 host->auto_runtime_pm = true; 1675 1676 bus_num = of_alias_get_id(pdev->dev.of_node, "spi"); 1677 if (bus_num >= 0) 1678 host->bus_num = bus_num; 1679 1680 tqspi->host = host; 1681 tqspi->dev = &pdev->dev; 1682 spin_lock_init(&tqspi->lock); 1683 1684 tqspi->soc_data = device_get_match_data(&pdev->dev); 1685 host->num_chipselect = tqspi->soc_data->cs_count; 1686 tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); 1687 if (IS_ERR(tqspi->base)) 1688 return PTR_ERR(tqspi->base); 1689 1690 tqspi->phys = r->start; 1691 qspi_irq = platform_get_irq(pdev, 0); 1692 if (qspi_irq < 0) 1693 return qspi_irq; 1694 tqspi->irq = qspi_irq; 1695 1696 if (!has_acpi_companion(tqspi->dev)) { 1697 tqspi->clk = devm_clk_get(&pdev->dev, "qspi"); 1698 if (IS_ERR(tqspi->clk)) { 1699 ret = PTR_ERR(tqspi->clk); 1700 dev_err(&pdev->dev, "failed to get clock: %d\n", ret); 1701 return ret; 1702 } 1703 1704 } 1705 1706 tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2; 1707 tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN; 1708 1709 ret = tegra_qspi_init_dma(tqspi); 1710 if (ret < 0) 1711 return ret; 1712 1713 if (tqspi->use_dma) 1714 tqspi->max_buf_size = tqspi->dma_buf_size; 1715 1716 init_completion(&tqspi->tx_dma_complete); 1717 init_completion(&tqspi->rx_dma_complete); 1718 init_completion(&tqspi->xfer_completion); 1719 1720 pm_runtime_enable(&pdev->dev); 1721 ret = pm_runtime_resume_and_get(&pdev->dev); 1722 if (ret < 0) { 1723 dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret); 1724 goto exit_pm_disable; 1725 } 1726 1727 if (device_reset(tqspi->dev) < 0) 1728 dev_warn_once(tqspi->dev, "device reset failed\n"); 1729 1730 tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL; 1731 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); 1732 tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1); 1733 tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2); 1734 tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2); 1735 1736 pm_runtime_put(&pdev->dev); 1737 1738 ret = request_threaded_irq(tqspi->irq, NULL, 1739 tegra_qspi_isr_thread, IRQF_ONESHOT, 1740 dev_name(&pdev->dev), tqspi); 1741 if (ret < 0) { 1742 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret); 1743 goto exit_pm_disable; 1744 } 1745 1746 host->dev.of_node = pdev->dev.of_node; 1747 ret = spi_register_controller(host); 1748 if (ret < 0) { 1749 dev_err(&pdev->dev, "failed to register host: %d\n", ret); 1750 goto exit_free_irq; 1751 } 1752 1753 return 0; 1754 1755 exit_free_irq: 1756 free_irq(qspi_irq, tqspi); 1757 exit_pm_disable: 1758 pm_runtime_force_suspend(&pdev->dev); 1759 tegra_qspi_deinit_dma(tqspi); 1760 return ret; 1761 } 1762 1763 static void tegra_qspi_remove(struct platform_device *pdev) 1764 { 1765 struct spi_controller *host = platform_get_drvdata(pdev); 1766 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1767 1768 spi_unregister_controller(host); 1769 free_irq(tqspi->irq, tqspi); 1770 pm_runtime_force_suspend(&pdev->dev); 1771 tegra_qspi_deinit_dma(tqspi); 1772 } 1773 1774 static int __maybe_unused tegra_qspi_suspend(struct device *dev) 1775 { 1776 struct spi_controller *host = dev_get_drvdata(dev); 1777 1778 return spi_controller_suspend(host); 1779 } 1780 1781 static int __maybe_unused tegra_qspi_resume(struct device *dev) 1782 { 1783 struct spi_controller *host = dev_get_drvdata(dev); 1784 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1785 int ret; 1786 1787 ret = pm_runtime_resume_and_get(dev); 1788 if (ret < 0) { 1789 dev_err(dev, "failed to get runtime PM: %d\n", ret); 1790 return ret; 1791 } 1792 1793 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); 1794 tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2); 1795 pm_runtime_put(dev); 1796 1797 return spi_controller_resume(host); 1798 } 1799 1800 static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev) 1801 { 1802 struct spi_controller *host = dev_get_drvdata(dev); 1803 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1804 1805 /* Runtime pm disabled with ACPI */ 1806 if (has_acpi_companion(tqspi->dev)) 1807 return 0; 1808 /* flush all write which are in PPSB queue by reading back */ 1809 tegra_qspi_readl(tqspi, QSPI_COMMAND1); 1810 1811 clk_disable_unprepare(tqspi->clk); 1812 1813 return 0; 1814 } 1815 1816 static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev) 1817 { 1818 struct spi_controller *host = dev_get_drvdata(dev); 1819 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1820 int ret; 1821 1822 /* Runtime pm disabled with ACPI */ 1823 if (has_acpi_companion(tqspi->dev)) 1824 return 0; 1825 ret = clk_prepare_enable(tqspi->clk); 1826 if (ret < 0) 1827 dev_err(tqspi->dev, "failed to enable clock: %d\n", ret); 1828 1829 return ret; 1830 } 1831 1832 static const struct dev_pm_ops tegra_qspi_pm_ops = { 1833 SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL) 1834 SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume) 1835 }; 1836 1837 static struct platform_driver tegra_qspi_driver = { 1838 .driver = { 1839 .name = "tegra-qspi", 1840 .pm = &tegra_qspi_pm_ops, 1841 .of_match_table = tegra_qspi_of_match, 1842 .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match), 1843 }, 1844 .probe = tegra_qspi_probe, 1845 .remove = tegra_qspi_remove, 1846 }; 1847 module_platform_driver(tegra_qspi_driver); 1848 1849 MODULE_ALIAS("platform:qspi-tegra"); 1850 MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver"); 1851 MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>"); 1852 MODULE_LICENSE("GPL v2"); 1853