1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright (C) 2020 NVIDIA CORPORATION. 4 5 #include <linux/clk.h> 6 #include <linux/completion.h> 7 #include <linux/delay.h> 8 #include <linux/dmaengine.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/dmapool.h> 11 #include <linux/err.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/iopoll.h> 15 #include <linux/kernel.h> 16 #include <linux/kthread.h> 17 #include <linux/module.h> 18 #include <linux/platform_device.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/of.h> 21 #include <linux/reset.h> 22 #include <linux/spi/spi.h> 23 #include <linux/acpi.h> 24 #include <linux/property.h> 25 #include <linux/sizes.h> 26 27 #define QSPI_COMMAND1 0x000 28 #define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0) 29 #define QSPI_PACKED BIT(5) 30 #define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7) 31 #define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7) 32 #define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0) 33 #define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1) 34 #define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2) 35 #define QSPI_SDR_DDR_SEL BIT(9) 36 #define QSPI_TX_EN BIT(11) 37 #define QSPI_RX_EN BIT(12) 38 #define QSPI_CS_SW_VAL BIT(20) 39 #define QSPI_CS_SW_HW BIT(21) 40 41 #define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n))) 42 #define QSPI_CS_POL_INACTIVE_MASK (0xF << 22) 43 #define QSPI_CS_SEL_0 (0 << 26) 44 #define QSPI_CS_SEL_1 (1 << 26) 45 #define QSPI_CS_SEL_2 (2 << 26) 46 #define QSPI_CS_SEL_3 (3 << 26) 47 #define QSPI_CS_SEL_MASK (3 << 26) 48 #define QSPI_CS_SEL(x) (((x) & 0x3) << 26) 49 50 #define QSPI_CONTROL_MODE_0 (0 << 28) 51 #define QSPI_CONTROL_MODE_3 (3 << 28) 52 #define QSPI_CONTROL_MODE_MASK (3 << 28) 53 #define QSPI_M_S BIT(30) 54 #define QSPI_PIO BIT(31) 55 56 #define QSPI_COMMAND2 0x004 57 #define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10) 58 #define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0) 59 60 #define QSPI_CS_TIMING1 0x008 61 #define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold)) 62 63 #define QSPI_CS_TIMING2 0x00c 64 #define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0) 65 #define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5) 66 67 #define QSPI_TRANS_STATUS 0x010 68 #define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff) 69 #define QSPI_RDY BIT(30) 70 71 #define QSPI_FIFO_STATUS 0x014 72 #define QSPI_RX_FIFO_EMPTY BIT(0) 73 #define QSPI_RX_FIFO_FULL BIT(1) 74 #define QSPI_TX_FIFO_EMPTY BIT(2) 75 #define QSPI_TX_FIFO_FULL BIT(3) 76 #define QSPI_RX_FIFO_UNF BIT(4) 77 #define QSPI_RX_FIFO_OVF BIT(5) 78 #define QSPI_TX_FIFO_UNF BIT(6) 79 #define QSPI_TX_FIFO_OVF BIT(7) 80 #define QSPI_ERR BIT(8) 81 #define QSPI_TX_FIFO_FLUSH BIT(14) 82 #define QSPI_RX_FIFO_FLUSH BIT(15) 83 #define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f) 84 #define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f) 85 86 #define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \ 87 QSPI_RX_FIFO_OVF | \ 88 QSPI_TX_FIFO_UNF | \ 89 QSPI_TX_FIFO_OVF) 90 #define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \ 91 QSPI_TX_FIFO_EMPTY) 92 93 #define QSPI_TX_DATA 0x018 94 #define QSPI_RX_DATA 0x01c 95 96 #define QSPI_DMA_CTL 0x020 97 #define QSPI_TX_TRIG(n) (((n) & 0x3) << 15) 98 #define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0) 99 #define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1) 100 #define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2) 101 #define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3) 102 103 #define QSPI_RX_TRIG(n) (((n) & 0x3) << 19) 104 #define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0) 105 #define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1) 106 #define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2) 107 #define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3) 108 109 #define QSPI_DMA_EN BIT(31) 110 111 #define QSPI_DMA_BLK 0x024 112 #define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0) 113 114 #define QSPI_DMA_MEM_ADDRESS 0x028 115 #define QSPI_DMA_HI_ADDRESS 0x02c 116 117 #define QSPI_TX_FIFO 0x108 118 #define QSPI_RX_FIFO 0x188 119 120 #define QSPI_FIFO_DEPTH 64 121 122 #define QSPI_INTR_MASK 0x18c 123 #define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25) 124 #define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26) 125 #define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27) 126 #define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28) 127 #define QSPI_INTR_RDY_MASK BIT(29) 128 #define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \ 129 QSPI_INTR_RX_FIFO_OVF_MASK | \ 130 QSPI_INTR_TX_FIFO_UNF_MASK | \ 131 QSPI_INTR_TX_FIFO_OVF_MASK) 132 133 #define QSPI_MISC_REG 0x194 134 #define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0) 135 #define QSPI_DUMMY_CYCLES_MAX 0xff 136 137 #define QSPI_CMB_SEQ_CMD 0x19c 138 #define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0) 139 140 #define QSPI_CMB_SEQ_CMD_CFG 0x1a0 141 #define QSPI_COMMAND_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) 142 #define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13) 143 #define QSPI_COMMAND_SDR_DDR BIT(12) 144 #define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0) 145 146 #define QSPI_GLOBAL_CONFIG 0X1a4 147 #define QSPI_CMB_SEQ_EN BIT(0) 148 #define QSPI_TPM_WAIT_POLL_EN BIT(1) 149 150 #define QSPI_CMB_SEQ_ADDR 0x1a8 151 #define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0) 152 153 #define QSPI_CMB_SEQ_ADDR_CFG 0x1ac 154 #define QSPI_ADDRESS_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) 155 #define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13) 156 #define QSPI_ADDRESS_SDR_DDR BIT(12) 157 #define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0) 158 159 #define DATA_DIR_TX BIT(0) 160 #define DATA_DIR_RX BIT(1) 161 162 #define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) 163 #define DEFAULT_QSPI_DMA_BUF_LEN SZ_64K 164 165 enum tegra_qspi_transfer_type { 166 CMD_TRANSFER = 0, 167 ADDR_TRANSFER = 1, 168 DUMMY_TRANSFER = 2, 169 DATA_TRANSFER = 3 170 }; 171 172 struct tegra_qspi_soc_data { 173 bool cmb_xfer_capable; 174 bool supports_tpm; 175 bool has_ext_dma; 176 unsigned int cs_count; 177 }; 178 179 struct tegra_qspi_client_data { 180 int tx_clk_tap_delay; 181 int rx_clk_tap_delay; 182 }; 183 184 struct tegra_qspi { 185 struct device *dev; 186 struct spi_controller *host; 187 /* lock to protect data accessed by irq */ 188 spinlock_t lock; 189 190 struct clk *clk; 191 void __iomem *base; 192 phys_addr_t phys; 193 unsigned int irq; 194 195 u32 cur_speed; 196 unsigned int cur_pos; 197 unsigned int words_per_32bit; 198 unsigned int bytes_per_word; 199 unsigned int curr_dma_words; 200 unsigned int cur_direction; 201 202 unsigned int cur_rx_pos; 203 unsigned int cur_tx_pos; 204 205 unsigned int dma_buf_size; 206 unsigned int max_buf_size; 207 bool is_curr_dma_xfer; 208 209 struct completion rx_dma_complete; 210 struct completion tx_dma_complete; 211 212 u32 tx_status; 213 u32 rx_status; 214 u32 status_reg; 215 bool is_packed; 216 bool use_dma; 217 218 u32 command1_reg; 219 u32 dma_control_reg; 220 u32 def_command1_reg; 221 u32 def_command2_reg; 222 u32 spi_cs_timing1; 223 u32 spi_cs_timing2; 224 u8 dummy_cycles; 225 226 struct completion xfer_completion; 227 struct spi_transfer *curr_xfer; 228 229 struct dma_chan *rx_dma_chan; 230 u32 *rx_dma_buf; 231 dma_addr_t rx_dma_phys; 232 struct dma_async_tx_descriptor *rx_dma_desc; 233 234 struct dma_chan *tx_dma_chan; 235 u32 *tx_dma_buf; 236 dma_addr_t tx_dma_phys; 237 struct dma_async_tx_descriptor *tx_dma_desc; 238 const struct tegra_qspi_soc_data *soc_data; 239 }; 240 241 static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset) 242 { 243 return readl(tqspi->base + offset); 244 } 245 246 static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset) 247 { 248 writel(value, tqspi->base + offset); 249 250 /* read back register to make sure that register writes completed */ 251 if (offset != QSPI_TX_FIFO) 252 readl(tqspi->base + QSPI_COMMAND1); 253 } 254 255 static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi) 256 { 257 u32 value; 258 259 /* write 1 to clear status register */ 260 value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS); 261 tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS); 262 263 value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK); 264 if (!(value & QSPI_INTR_RDY_MASK)) { 265 value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR); 266 tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK); 267 } 268 269 /* clear fifo status error if any */ 270 value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 271 if (value & QSPI_ERR) 272 tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS); 273 } 274 275 static unsigned int 276 tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t) 277 { 278 unsigned int max_word, max_len, total_fifo_words; 279 unsigned int remain_len = t->len - tqspi->cur_pos; 280 unsigned int bits_per_word = t->bits_per_word; 281 282 tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8); 283 284 /* 285 * Tegra QSPI controller supports packed or unpacked mode transfers. 286 * Packed mode is used for data transfers using 8, 16, or 32 bits per 287 * word with a minimum transfer of 1 word and for all other transfers 288 * unpacked mode will be used. 289 */ 290 291 if ((bits_per_word == 8 || bits_per_word == 16 || 292 bits_per_word == 32) && t->len > 3) { 293 tqspi->is_packed = true; 294 tqspi->words_per_32bit = 32 / bits_per_word; 295 } else { 296 tqspi->is_packed = false; 297 tqspi->words_per_32bit = 1; 298 } 299 300 if (tqspi->is_packed) { 301 max_len = min(remain_len, tqspi->max_buf_size); 302 tqspi->curr_dma_words = max_len / tqspi->bytes_per_word; 303 total_fifo_words = (max_len + 3) / 4; 304 } else { 305 max_word = (remain_len - 1) / tqspi->bytes_per_word + 1; 306 max_word = min(max_word, tqspi->max_buf_size / 4); 307 tqspi->curr_dma_words = max_word; 308 total_fifo_words = max_word; 309 } 310 311 return total_fifo_words; 312 } 313 314 static unsigned int 315 tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 316 { 317 unsigned int written_words, fifo_words_left, count; 318 unsigned int len, tx_empty_count, max_n_32bit, i; 319 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; 320 u32 fifo_status; 321 322 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 323 tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status); 324 325 if (tqspi->is_packed) { 326 fifo_words_left = tx_empty_count * tqspi->words_per_32bit; 327 written_words = min(fifo_words_left, tqspi->curr_dma_words); 328 len = written_words * tqspi->bytes_per_word; 329 max_n_32bit = DIV_ROUND_UP(len, 4); 330 for (count = 0; count < max_n_32bit; count++) { 331 u32 x = 0; 332 333 for (i = 0; (i < 4) && len; i++, len--) 334 x |= (u32)(*tx_buf++) << (i * 8); 335 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO); 336 } 337 338 tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word; 339 } else { 340 unsigned int write_bytes; 341 u8 bytes_per_word = tqspi->bytes_per_word; 342 343 max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count); 344 written_words = max_n_32bit; 345 len = written_words * tqspi->bytes_per_word; 346 if (len > t->len - tqspi->cur_pos) 347 len = t->len - tqspi->cur_pos; 348 write_bytes = len; 349 for (count = 0; count < max_n_32bit; count++) { 350 u32 x = 0; 351 352 for (i = 0; len && (i < min(4, bytes_per_word)); i++, len--) 353 x |= (u32)(*tx_buf++) << (i * 8); 354 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO); 355 } 356 357 tqspi->cur_tx_pos += write_bytes; 358 } 359 360 return written_words; 361 } 362 363 static unsigned int 364 tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 365 { 366 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos; 367 unsigned int len, rx_full_count, count, i; 368 unsigned int read_words = 0; 369 u32 fifo_status, x; 370 371 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 372 rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status); 373 if (tqspi->is_packed) { 374 len = tqspi->curr_dma_words * tqspi->bytes_per_word; 375 for (count = 0; count < rx_full_count; count++) { 376 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO); 377 378 for (i = 0; len && (i < 4); i++, len--) 379 *rx_buf++ = (x >> i * 8) & 0xff; 380 } 381 382 read_words += tqspi->curr_dma_words; 383 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; 384 } else { 385 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; 386 u8 bytes_per_word = tqspi->bytes_per_word; 387 unsigned int read_bytes; 388 389 len = rx_full_count * bytes_per_word; 390 if (len > t->len - tqspi->cur_pos) 391 len = t->len - tqspi->cur_pos; 392 read_bytes = len; 393 for (count = 0; count < rx_full_count; count++) { 394 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask; 395 396 for (i = 0; len && (i < bytes_per_word); i++, len--) 397 *rx_buf++ = (x >> (i * 8)) & 0xff; 398 } 399 400 read_words += rx_full_count; 401 tqspi->cur_rx_pos += read_bytes; 402 } 403 404 return read_words; 405 } 406 407 static void 408 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 409 { 410 dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys, 411 tqspi->dma_buf_size, DMA_TO_DEVICE); 412 413 /* 414 * In packed mode, each word in FIFO may contain multiple packets 415 * based on bits per word. So all bytes in each FIFO word are valid. 416 * 417 * In unpacked mode, each word in FIFO contains single packet and 418 * based on bits per word any remaining bits in FIFO word will be 419 * ignored by the hardware and are invalid bits. 420 */ 421 if (tqspi->is_packed) { 422 tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; 423 } else { 424 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; 425 unsigned int i, count, consume, write_bytes; 426 427 /* 428 * Fill tx_dma_buf to contain single packet in each word based 429 * on bits per word from SPI core tx_buf. 430 */ 431 consume = tqspi->curr_dma_words * tqspi->bytes_per_word; 432 if (consume > t->len - tqspi->cur_pos) 433 consume = t->len - tqspi->cur_pos; 434 write_bytes = consume; 435 for (count = 0; count < tqspi->curr_dma_words; count++) { 436 u32 x = 0; 437 438 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--) 439 x |= (u32)(*tx_buf++) << (i * 8); 440 tqspi->tx_dma_buf[count] = x; 441 } 442 443 tqspi->cur_tx_pos += write_bytes; 444 } 445 446 dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys, 447 tqspi->dma_buf_size, DMA_TO_DEVICE); 448 } 449 450 static void 451 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 452 { 453 dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys, 454 tqspi->dma_buf_size, DMA_FROM_DEVICE); 455 456 if (tqspi->is_packed) { 457 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; 458 } else { 459 unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos; 460 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; 461 unsigned int i, count, consume, read_bytes; 462 463 /* 464 * Each FIFO word contains single data packet. 465 * Skip invalid bits in each FIFO word based on bits per word 466 * and align bytes while filling in SPI core rx_buf. 467 */ 468 consume = tqspi->curr_dma_words * tqspi->bytes_per_word; 469 if (consume > t->len - tqspi->cur_pos) 470 consume = t->len - tqspi->cur_pos; 471 read_bytes = consume; 472 for (count = 0; count < tqspi->curr_dma_words; count++) { 473 u32 x = tqspi->rx_dma_buf[count] & rx_mask; 474 475 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--) 476 *rx_buf++ = (x >> (i * 8)) & 0xff; 477 } 478 479 tqspi->cur_rx_pos += read_bytes; 480 } 481 482 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, 483 tqspi->dma_buf_size, DMA_FROM_DEVICE); 484 } 485 486 static void tegra_qspi_dma_complete(void *args) 487 { 488 struct completion *dma_complete = args; 489 490 complete(dma_complete); 491 } 492 493 static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len) 494 { 495 dma_addr_t tx_dma_phys; 496 497 reinit_completion(&tqspi->tx_dma_complete); 498 499 if (tqspi->is_packed) 500 tx_dma_phys = t->tx_dma; 501 else 502 tx_dma_phys = tqspi->tx_dma_phys; 503 504 tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys, 505 len, DMA_MEM_TO_DEV, 506 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 507 508 if (!tqspi->tx_dma_desc) { 509 dev_err(tqspi->dev, "Unable to get TX descriptor\n"); 510 return -EIO; 511 } 512 513 tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete; 514 tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete; 515 dmaengine_submit(tqspi->tx_dma_desc); 516 dma_async_issue_pending(tqspi->tx_dma_chan); 517 518 return 0; 519 } 520 521 static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len) 522 { 523 dma_addr_t rx_dma_phys; 524 525 reinit_completion(&tqspi->rx_dma_complete); 526 527 if (tqspi->is_packed) 528 rx_dma_phys = t->rx_dma; 529 else 530 rx_dma_phys = tqspi->rx_dma_phys; 531 532 tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys, 533 len, DMA_DEV_TO_MEM, 534 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 535 536 if (!tqspi->rx_dma_desc) { 537 dev_err(tqspi->dev, "Unable to get RX descriptor\n"); 538 return -EIO; 539 } 540 541 tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete; 542 tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete; 543 dmaengine_submit(tqspi->rx_dma_desc); 544 dma_async_issue_pending(tqspi->rx_dma_chan); 545 546 return 0; 547 } 548 549 static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic) 550 { 551 void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS; 552 u32 val; 553 554 val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 555 if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY) 556 return 0; 557 558 val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH; 559 tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS); 560 561 if (!atomic) 562 return readl_relaxed_poll_timeout(addr, val, 563 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY, 564 1000, 1000000); 565 566 return readl_relaxed_poll_timeout_atomic(addr, val, 567 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY, 568 1000, 1000000); 569 } 570 571 static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi) 572 { 573 u32 intr_mask; 574 575 intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK); 576 intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR); 577 tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK); 578 } 579 580 static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 581 { 582 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; 583 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos; 584 unsigned int len; 585 586 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 587 588 if (t->tx_buf) { 589 t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE); 590 if (dma_mapping_error(tqspi->dev, t->tx_dma)) 591 return -ENOMEM; 592 } 593 594 if (t->rx_buf) { 595 t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE); 596 if (dma_mapping_error(tqspi->dev, t->rx_dma)) { 597 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); 598 return -ENOMEM; 599 } 600 } 601 602 return 0; 603 } 604 605 static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 606 { 607 unsigned int len; 608 609 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 610 611 if (t->tx_buf) 612 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); 613 if (t->rx_buf) 614 dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); 615 } 616 617 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 618 { 619 struct dma_slave_config dma_sconfig = { 0 }; 620 dma_addr_t rx_dma_phys, tx_dma_phys; 621 unsigned int len; 622 u8 dma_burst; 623 int ret = 0; 624 u32 val; 625 626 if (tqspi->is_packed) { 627 ret = tegra_qspi_dma_map_xfer(tqspi, t); 628 if (ret < 0) 629 return ret; 630 } 631 632 val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1); 633 tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK); 634 635 tegra_qspi_unmask_irq(tqspi); 636 637 if (tqspi->is_packed) 638 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 639 else 640 len = tqspi->curr_dma_words * 4; 641 642 /* set attention level based on length of transfer */ 643 if (tqspi->soc_data->has_ext_dma) { 644 val = 0; 645 if (len & 0xf) { 646 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1; 647 dma_burst = 1; 648 } else if (((len) >> 4) & 0x1) { 649 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4; 650 dma_burst = 4; 651 } else { 652 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8; 653 dma_burst = 8; 654 } 655 656 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); 657 } 658 659 tqspi->dma_control_reg = val; 660 661 dma_sconfig.device_fc = true; 662 663 if (tqspi->cur_direction & DATA_DIR_TX) { 664 if (tqspi->tx_dma_chan) { 665 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; 666 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 667 dma_sconfig.dst_maxburst = dma_burst; 668 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); 669 if (ret < 0) { 670 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 671 return ret; 672 } 673 674 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); 675 ret = tegra_qspi_start_tx_dma(tqspi, t, len); 676 if (ret < 0) { 677 dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); 678 return ret; 679 } 680 } else { 681 if (tqspi->is_packed) 682 tx_dma_phys = t->tx_dma; 683 else 684 tx_dma_phys = tqspi->tx_dma_phys; 685 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); 686 tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys), 687 QSPI_DMA_MEM_ADDRESS); 688 tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff), 689 QSPI_DMA_HI_ADDRESS); 690 } 691 } 692 693 if (tqspi->cur_direction & DATA_DIR_RX) { 694 if (tqspi->rx_dma_chan) { 695 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; 696 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 697 dma_sconfig.src_maxburst = dma_burst; 698 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); 699 if (ret < 0) { 700 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 701 return ret; 702 } 703 704 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, 705 tqspi->dma_buf_size, DMA_FROM_DEVICE); 706 ret = tegra_qspi_start_rx_dma(tqspi, t, len); 707 if (ret < 0) { 708 dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); 709 if (tqspi->cur_direction & DATA_DIR_TX) 710 dmaengine_terminate_all(tqspi->tx_dma_chan); 711 return ret; 712 } 713 } else { 714 if (tqspi->is_packed) 715 rx_dma_phys = t->rx_dma; 716 else 717 rx_dma_phys = tqspi->rx_dma_phys; 718 719 tegra_qspi_writel(tqspi, lower_32_bits(rx_dma_phys), 720 QSPI_DMA_MEM_ADDRESS); 721 tegra_qspi_writel(tqspi, (upper_32_bits(rx_dma_phys) & 0xff), 722 QSPI_DMA_HI_ADDRESS); 723 } 724 } 725 726 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); 727 728 tqspi->is_curr_dma_xfer = true; 729 tqspi->dma_control_reg = val; 730 val |= QSPI_DMA_EN; 731 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); 732 733 return ret; 734 } 735 736 static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t) 737 { 738 u32 val; 739 unsigned int cur_words; 740 741 if (qspi->cur_direction & DATA_DIR_TX) 742 cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t); 743 else 744 cur_words = qspi->curr_dma_words; 745 746 val = QSPI_DMA_BLK_SET(cur_words - 1); 747 tegra_qspi_writel(qspi, val, QSPI_DMA_BLK); 748 749 tegra_qspi_unmask_irq(qspi); 750 751 qspi->is_curr_dma_xfer = false; 752 val = qspi->command1_reg; 753 val |= QSPI_PIO; 754 tegra_qspi_writel(qspi, val, QSPI_COMMAND1); 755 756 return 0; 757 } 758 759 static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi) 760 { 761 if (tqspi->tx_dma_buf) { 762 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, 763 tqspi->tx_dma_buf, tqspi->tx_dma_phys); 764 tqspi->tx_dma_buf = NULL; 765 } 766 767 if (tqspi->tx_dma_chan) { 768 dma_release_channel(tqspi->tx_dma_chan); 769 tqspi->tx_dma_chan = NULL; 770 } 771 772 if (tqspi->rx_dma_buf) { 773 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, 774 tqspi->rx_dma_buf, tqspi->rx_dma_phys); 775 tqspi->rx_dma_buf = NULL; 776 } 777 778 if (tqspi->rx_dma_chan) { 779 dma_release_channel(tqspi->rx_dma_chan); 780 tqspi->rx_dma_chan = NULL; 781 } 782 } 783 784 static int tegra_qspi_init_dma(struct tegra_qspi *tqspi) 785 { 786 struct dma_chan *dma_chan; 787 dma_addr_t dma_phys; 788 u32 *dma_buf; 789 int err; 790 791 if (tqspi->soc_data->has_ext_dma) { 792 dma_chan = dma_request_chan(tqspi->dev, "rx"); 793 if (IS_ERR(dma_chan)) { 794 err = PTR_ERR(dma_chan); 795 goto err_out; 796 } 797 798 tqspi->rx_dma_chan = dma_chan; 799 800 dma_chan = dma_request_chan(tqspi->dev, "tx"); 801 if (IS_ERR(dma_chan)) { 802 err = PTR_ERR(dma_chan); 803 goto err_out; 804 } 805 806 tqspi->tx_dma_chan = dma_chan; 807 } else { 808 if (!device_iommu_mapped(tqspi->dev)) { 809 dev_warn(tqspi->dev, 810 "IOMMU not enabled in device-tree, falling back to PIO mode\n"); 811 return 0; 812 } 813 } 814 815 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); 816 if (!dma_buf) { 817 err = -ENOMEM; 818 goto err_out; 819 } 820 821 tqspi->rx_dma_buf = dma_buf; 822 tqspi->rx_dma_phys = dma_phys; 823 824 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); 825 if (!dma_buf) { 826 err = -ENOMEM; 827 goto err_out; 828 } 829 830 tqspi->tx_dma_buf = dma_buf; 831 tqspi->tx_dma_phys = dma_phys; 832 tqspi->use_dma = true; 833 834 return 0; 835 836 err_out: 837 tegra_qspi_deinit_dma(tqspi); 838 839 if (err != -EPROBE_DEFER) { 840 dev_err(tqspi->dev, "cannot use DMA: %d\n", err); 841 dev_err(tqspi->dev, "falling back to PIO\n"); 842 return 0; 843 } 844 845 return err; 846 } 847 848 static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t, 849 bool is_first_of_msg) 850 { 851 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 852 struct tegra_qspi_client_data *cdata = spi->controller_data; 853 u32 command1, command2, speed = t->speed_hz; 854 u8 bits_per_word = t->bits_per_word; 855 u32 tx_tap = 0, rx_tap = 0; 856 int req_mode; 857 858 if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) { 859 clk_set_rate(tqspi->clk, speed); 860 tqspi->cur_speed = speed; 861 } 862 863 tqspi->cur_pos = 0; 864 tqspi->cur_rx_pos = 0; 865 tqspi->cur_tx_pos = 0; 866 tqspi->curr_xfer = t; 867 868 if (is_first_of_msg) { 869 tegra_qspi_mask_clear_irq(tqspi); 870 871 command1 = tqspi->def_command1_reg; 872 command1 |= QSPI_CS_SEL(spi_get_chipselect(spi, 0)); 873 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); 874 875 command1 &= ~QSPI_CONTROL_MODE_MASK; 876 req_mode = spi->mode & 0x3; 877 if (req_mode == SPI_MODE_3) 878 command1 |= QSPI_CONTROL_MODE_3; 879 else 880 command1 |= QSPI_CONTROL_MODE_0; 881 882 if (spi->mode & SPI_CS_HIGH) 883 command1 |= QSPI_CS_SW_VAL; 884 else 885 command1 &= ~QSPI_CS_SW_VAL; 886 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1); 887 888 if (cdata && cdata->tx_clk_tap_delay) 889 tx_tap = cdata->tx_clk_tap_delay; 890 891 if (cdata && cdata->rx_clk_tap_delay) 892 rx_tap = cdata->rx_clk_tap_delay; 893 894 command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap); 895 if (command2 != tqspi->def_command2_reg) 896 tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2); 897 898 } else { 899 command1 = tqspi->command1_reg; 900 command1 &= ~QSPI_BIT_LENGTH(~0); 901 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); 902 } 903 904 command1 &= ~QSPI_SDR_DDR_SEL; 905 906 return command1; 907 } 908 909 static int tegra_qspi_start_transfer_one(struct spi_device *spi, 910 struct spi_transfer *t, u32 command1) 911 { 912 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 913 unsigned int total_fifo_words; 914 u8 bus_width = 0; 915 int ret; 916 917 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t); 918 919 command1 &= ~QSPI_PACKED; 920 if (tqspi->is_packed) 921 command1 |= QSPI_PACKED; 922 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1); 923 924 tqspi->cur_direction = 0; 925 926 command1 &= ~(QSPI_TX_EN | QSPI_RX_EN); 927 if (t->rx_buf) { 928 command1 |= QSPI_RX_EN; 929 tqspi->cur_direction |= DATA_DIR_RX; 930 bus_width = t->rx_nbits; 931 } 932 933 if (t->tx_buf) { 934 command1 |= QSPI_TX_EN; 935 tqspi->cur_direction |= DATA_DIR_TX; 936 bus_width = t->tx_nbits; 937 } 938 939 command1 &= ~QSPI_INTERFACE_WIDTH_MASK; 940 941 if (bus_width == SPI_NBITS_QUAD) 942 command1 |= QSPI_INTERFACE_WIDTH_QUAD; 943 else if (bus_width == SPI_NBITS_DUAL) 944 command1 |= QSPI_INTERFACE_WIDTH_DUAL; 945 else 946 command1 |= QSPI_INTERFACE_WIDTH_SINGLE; 947 948 tqspi->command1_reg = command1; 949 950 tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG); 951 952 ret = tegra_qspi_flush_fifos(tqspi, false); 953 if (ret < 0) 954 return ret; 955 956 if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH) 957 ret = tegra_qspi_start_dma_based_transfer(tqspi, t); 958 else 959 ret = tegra_qspi_start_cpu_based_transfer(tqspi, t); 960 961 return ret; 962 } 963 964 static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi) 965 { 966 struct tegra_qspi_client_data *cdata; 967 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 968 969 cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL); 970 if (!cdata) 971 return NULL; 972 973 device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay", 974 &cdata->tx_clk_tap_delay); 975 device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay", 976 &cdata->rx_clk_tap_delay); 977 978 return cdata; 979 } 980 981 static int tegra_qspi_setup(struct spi_device *spi) 982 { 983 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 984 struct tegra_qspi_client_data *cdata = spi->controller_data; 985 unsigned long flags; 986 u32 val; 987 int ret; 988 989 ret = pm_runtime_resume_and_get(tqspi->dev); 990 if (ret < 0) { 991 dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret); 992 return ret; 993 } 994 995 if (!cdata) { 996 cdata = tegra_qspi_parse_cdata_dt(spi); 997 spi->controller_data = cdata; 998 } 999 spin_lock_irqsave(&tqspi->lock, flags); 1000 1001 /* keep default cs state to inactive */ 1002 val = tqspi->def_command1_reg; 1003 val |= QSPI_CS_SEL(spi_get_chipselect(spi, 0)); 1004 if (spi->mode & SPI_CS_HIGH) 1005 val &= ~QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0)); 1006 else 1007 val |= QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0)); 1008 1009 tqspi->def_command1_reg = val; 1010 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); 1011 1012 spin_unlock_irqrestore(&tqspi->lock, flags); 1013 1014 pm_runtime_put(tqspi->dev); 1015 1016 return 0; 1017 } 1018 1019 static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi) 1020 { 1021 dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n"); 1022 dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n", 1023 tegra_qspi_readl(tqspi, QSPI_COMMAND1), 1024 tegra_qspi_readl(tqspi, QSPI_COMMAND2)); 1025 dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n", 1026 tegra_qspi_readl(tqspi, QSPI_DMA_CTL), 1027 tegra_qspi_readl(tqspi, QSPI_DMA_BLK)); 1028 dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n", 1029 tegra_qspi_readl(tqspi, QSPI_INTR_MASK), 1030 tegra_qspi_readl(tqspi, QSPI_MISC_REG)); 1031 dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n", 1032 tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS), 1033 tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS)); 1034 } 1035 1036 static void tegra_qspi_handle_error(struct tegra_qspi *tqspi) 1037 { 1038 dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg); 1039 tegra_qspi_dump_regs(tqspi); 1040 tegra_qspi_flush_fifos(tqspi, true); 1041 if (device_reset(tqspi->dev) < 0) 1042 dev_warn_once(tqspi->dev, "device reset failed\n"); 1043 } 1044 1045 static void tegra_qspi_transfer_end(struct spi_device *spi) 1046 { 1047 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller); 1048 int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1; 1049 1050 if (cs_val) 1051 tqspi->command1_reg |= QSPI_CS_SW_VAL; 1052 else 1053 tqspi->command1_reg &= ~QSPI_CS_SW_VAL; 1054 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); 1055 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); 1056 } 1057 1058 static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len) 1059 { 1060 u32 cmd_config = 0; 1061 1062 /* Extract Command configuration and value */ 1063 if (is_ddr) 1064 cmd_config |= QSPI_COMMAND_SDR_DDR; 1065 else 1066 cmd_config &= ~QSPI_COMMAND_SDR_DDR; 1067 1068 cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width); 1069 cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1); 1070 1071 return cmd_config; 1072 } 1073 1074 static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len) 1075 { 1076 u32 addr_config = 0; 1077 1078 if (is_ddr) 1079 addr_config |= QSPI_ADDRESS_SDR_DDR; 1080 else 1081 addr_config &= ~QSPI_ADDRESS_SDR_DDR; 1082 1083 addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width); 1084 addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1); 1085 1086 return addr_config; 1087 } 1088 1089 static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, 1090 struct spi_message *msg) 1091 { 1092 bool is_first_msg = true; 1093 struct spi_transfer *xfer; 1094 struct spi_device *spi = msg->spi; 1095 u8 transfer_phase = 0; 1096 u32 cmd1 = 0, dma_ctl = 0; 1097 int ret = 0; 1098 u32 address_value = 0; 1099 u32 cmd_config = 0, addr_config = 0; 1100 u8 cmd_value = 0, val = 0; 1101 1102 /* Enable Combined sequence mode */ 1103 val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG); 1104 if (spi->mode & SPI_TPM_HW_FLOW) { 1105 if (tqspi->soc_data->supports_tpm) 1106 val |= QSPI_TPM_WAIT_POLL_EN; 1107 else 1108 return -EIO; 1109 } 1110 val |= QSPI_CMB_SEQ_EN; 1111 tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG); 1112 /* Process individual transfer list */ 1113 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1114 switch (transfer_phase) { 1115 case CMD_TRANSFER: 1116 /* X1 SDR mode */ 1117 cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits, 1118 xfer->len); 1119 cmd_value = *((const u8 *)(xfer->tx_buf)); 1120 break; 1121 case ADDR_TRANSFER: 1122 /* X1 SDR mode */ 1123 addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits, 1124 xfer->len); 1125 address_value = *((const u32 *)(xfer->tx_buf)); 1126 break; 1127 case DUMMY_TRANSFER: 1128 if (xfer->dummy_data) { 1129 tqspi->dummy_cycles = xfer->len * 8 / xfer->tx_nbits; 1130 break; 1131 } 1132 transfer_phase++; 1133 fallthrough; 1134 case DATA_TRANSFER: 1135 /* Program Command, Address value in register */ 1136 tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD); 1137 tegra_qspi_writel(tqspi, address_value, 1138 QSPI_CMB_SEQ_ADDR); 1139 /* Program Command and Address config in register */ 1140 tegra_qspi_writel(tqspi, cmd_config, 1141 QSPI_CMB_SEQ_CMD_CFG); 1142 tegra_qspi_writel(tqspi, addr_config, 1143 QSPI_CMB_SEQ_ADDR_CFG); 1144 1145 reinit_completion(&tqspi->xfer_completion); 1146 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, 1147 is_first_msg); 1148 ret = tegra_qspi_start_transfer_one(spi, xfer, 1149 cmd1); 1150 1151 if (ret < 0) { 1152 dev_err(tqspi->dev, "Failed to start transfer-one: %d\n", 1153 ret); 1154 return ret; 1155 } 1156 1157 is_first_msg = false; 1158 ret = wait_for_completion_timeout 1159 (&tqspi->xfer_completion, 1160 QSPI_DMA_TIMEOUT); 1161 1162 if (WARN_ON_ONCE(ret == 0)) { 1163 dev_err_ratelimited(tqspi->dev, 1164 "QSPI Transfer failed with timeout\n"); 1165 if (tqspi->is_curr_dma_xfer) { 1166 if ((tqspi->cur_direction & DATA_DIR_TX) && 1167 tqspi->tx_dma_chan) 1168 dmaengine_terminate_all(tqspi->tx_dma_chan); 1169 if ((tqspi->cur_direction & DATA_DIR_RX) && 1170 tqspi->rx_dma_chan) 1171 dmaengine_terminate_all(tqspi->rx_dma_chan); 1172 } 1173 1174 /* Abort transfer by resetting pio/dma bit */ 1175 if (!tqspi->is_curr_dma_xfer) { 1176 cmd1 = tegra_qspi_readl 1177 (tqspi, 1178 QSPI_COMMAND1); 1179 cmd1 &= ~QSPI_PIO; 1180 tegra_qspi_writel 1181 (tqspi, cmd1, 1182 QSPI_COMMAND1); 1183 } else { 1184 dma_ctl = tegra_qspi_readl 1185 (tqspi, 1186 QSPI_DMA_CTL); 1187 dma_ctl &= ~QSPI_DMA_EN; 1188 tegra_qspi_writel(tqspi, dma_ctl, 1189 QSPI_DMA_CTL); 1190 } 1191 1192 /* Reset controller if timeout happens */ 1193 if (device_reset(tqspi->dev) < 0) 1194 dev_warn_once(tqspi->dev, 1195 "device reset failed\n"); 1196 ret = -EIO; 1197 goto exit; 1198 } 1199 1200 if (tqspi->tx_status || tqspi->rx_status) { 1201 dev_err(tqspi->dev, "QSPI Transfer failed\n"); 1202 tqspi->tx_status = 0; 1203 tqspi->rx_status = 0; 1204 ret = -EIO; 1205 goto exit; 1206 } 1207 break; 1208 default: 1209 ret = -EINVAL; 1210 goto exit; 1211 } 1212 msg->actual_length += xfer->len; 1213 if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) { 1214 tegra_qspi_transfer_end(spi); 1215 spi_transfer_delay_exec(xfer); 1216 } 1217 transfer_phase++; 1218 } 1219 ret = 0; 1220 1221 exit: 1222 msg->status = ret; 1223 1224 return ret; 1225 } 1226 1227 static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi, 1228 struct spi_message *msg) 1229 { 1230 struct spi_device *spi = msg->spi; 1231 struct spi_transfer *transfer; 1232 bool is_first_msg = true; 1233 int ret = 0, val = 0; 1234 1235 msg->status = 0; 1236 msg->actual_length = 0; 1237 tqspi->tx_status = 0; 1238 tqspi->rx_status = 0; 1239 1240 /* Disable Combined sequence mode */ 1241 val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG); 1242 val &= ~QSPI_CMB_SEQ_EN; 1243 if (tqspi->soc_data->supports_tpm) 1244 val &= ~QSPI_TPM_WAIT_POLL_EN; 1245 tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG); 1246 list_for_each_entry(transfer, &msg->transfers, transfer_list) { 1247 struct spi_transfer *xfer = transfer; 1248 u8 dummy_bytes = 0; 1249 u32 cmd1; 1250 1251 tqspi->dummy_cycles = 0; 1252 /* 1253 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer 1254 * bytes based on programmed dummy clock cycles in the QSPI_MISC register. 1255 * So, check if the next transfer is dummy data transfer and program dummy 1256 * clock cycles along with the current transfer and skip next transfer. 1257 */ 1258 if (!list_is_last(&xfer->transfer_list, &msg->transfers)) { 1259 struct spi_transfer *next_xfer; 1260 1261 next_xfer = list_next_entry(xfer, transfer_list); 1262 if (next_xfer->dummy_data) { 1263 u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits; 1264 1265 if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) { 1266 tqspi->dummy_cycles = dummy_cycles; 1267 dummy_bytes = next_xfer->len; 1268 transfer = next_xfer; 1269 } 1270 } 1271 } 1272 1273 reinit_completion(&tqspi->xfer_completion); 1274 1275 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg); 1276 1277 ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1); 1278 if (ret < 0) { 1279 dev_err(tqspi->dev, "failed to start transfer: %d\n", ret); 1280 goto complete_xfer; 1281 } 1282 1283 ret = wait_for_completion_timeout(&tqspi->xfer_completion, 1284 QSPI_DMA_TIMEOUT); 1285 if (WARN_ON(ret == 0)) { 1286 dev_err(tqspi->dev, "transfer timeout\n"); 1287 if (tqspi->is_curr_dma_xfer) { 1288 if ((tqspi->cur_direction & DATA_DIR_TX) && tqspi->tx_dma_chan) 1289 dmaengine_terminate_all(tqspi->tx_dma_chan); 1290 if ((tqspi->cur_direction & DATA_DIR_RX) && tqspi->rx_dma_chan) 1291 dmaengine_terminate_all(tqspi->rx_dma_chan); 1292 } 1293 tegra_qspi_handle_error(tqspi); 1294 ret = -EIO; 1295 goto complete_xfer; 1296 } 1297 1298 if (tqspi->tx_status || tqspi->rx_status) { 1299 tegra_qspi_handle_error(tqspi); 1300 ret = -EIO; 1301 goto complete_xfer; 1302 } 1303 1304 msg->actual_length += xfer->len + dummy_bytes; 1305 1306 complete_xfer: 1307 if (ret < 0) { 1308 tegra_qspi_transfer_end(spi); 1309 spi_transfer_delay_exec(xfer); 1310 goto exit; 1311 } 1312 1313 if (list_is_last(&xfer->transfer_list, &msg->transfers)) { 1314 /* de-activate CS after last transfer only when cs_change is not set */ 1315 if (!xfer->cs_change) { 1316 tegra_qspi_transfer_end(spi); 1317 spi_transfer_delay_exec(xfer); 1318 } 1319 } else if (xfer->cs_change) { 1320 /* de-activated CS between the transfers only when cs_change is set */ 1321 tegra_qspi_transfer_end(spi); 1322 spi_transfer_delay_exec(xfer); 1323 } 1324 } 1325 1326 ret = 0; 1327 exit: 1328 msg->status = ret; 1329 1330 return ret; 1331 } 1332 1333 static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi, 1334 struct spi_message *msg) 1335 { 1336 int transfer_count = 0; 1337 struct spi_transfer *xfer; 1338 1339 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1340 transfer_count++; 1341 } 1342 if (!tqspi->soc_data->cmb_xfer_capable) 1343 return false; 1344 if (transfer_count > 4 || transfer_count < 3) 1345 return false; 1346 xfer = list_first_entry(&msg->transfers, typeof(*xfer), 1347 transfer_list); 1348 if (xfer->len > 2) 1349 return false; 1350 xfer = list_next_entry(xfer, transfer_list); 1351 if (xfer->len > 4 || xfer->len < 3) 1352 return false; 1353 xfer = list_next_entry(xfer, transfer_list); 1354 if (transfer_count == 4) { 1355 if (xfer->dummy_data != 1) 1356 return false; 1357 if ((xfer->len * 8 / xfer->tx_nbits) > QSPI_DUMMY_CYCLES_MAX) 1358 return false; 1359 xfer = list_next_entry(xfer, transfer_list); 1360 } 1361 if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) 1362 return false; 1363 1364 return true; 1365 } 1366 1367 static int tegra_qspi_transfer_one_message(struct spi_controller *host, 1368 struct spi_message *msg) 1369 { 1370 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1371 int ret; 1372 1373 if (tegra_qspi_validate_cmb_seq(tqspi, msg)) 1374 ret = tegra_qspi_combined_seq_xfer(tqspi, msg); 1375 else 1376 ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg); 1377 1378 spi_finalize_current_message(host); 1379 1380 return ret; 1381 } 1382 1383 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi) 1384 { 1385 struct spi_transfer *t = tqspi->curr_xfer; 1386 unsigned long flags; 1387 1388 spin_lock_irqsave(&tqspi->lock, flags); 1389 1390 if (tqspi->tx_status || tqspi->rx_status) { 1391 tegra_qspi_handle_error(tqspi); 1392 complete(&tqspi->xfer_completion); 1393 goto exit; 1394 } 1395 1396 if (tqspi->cur_direction & DATA_DIR_RX) 1397 tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t); 1398 1399 if (tqspi->cur_direction & DATA_DIR_TX) 1400 tqspi->cur_pos = tqspi->cur_tx_pos; 1401 else 1402 tqspi->cur_pos = tqspi->cur_rx_pos; 1403 1404 if (tqspi->cur_pos == t->len) { 1405 complete(&tqspi->xfer_completion); 1406 goto exit; 1407 } 1408 1409 tegra_qspi_calculate_curr_xfer_param(tqspi, t); 1410 tegra_qspi_start_cpu_based_transfer(tqspi, t); 1411 exit: 1412 spin_unlock_irqrestore(&tqspi->lock, flags); 1413 return IRQ_HANDLED; 1414 } 1415 1416 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) 1417 { 1418 struct spi_transfer *t = tqspi->curr_xfer; 1419 unsigned int total_fifo_words; 1420 unsigned long flags; 1421 long wait_status; 1422 int num_errors = 0; 1423 1424 if (tqspi->cur_direction & DATA_DIR_TX) { 1425 if (tqspi->tx_status) { 1426 if (tqspi->tx_dma_chan) 1427 dmaengine_terminate_all(tqspi->tx_dma_chan); 1428 num_errors++; 1429 } else if (tqspi->tx_dma_chan) { 1430 wait_status = wait_for_completion_interruptible_timeout( 1431 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT); 1432 if (wait_status <= 0) { 1433 dmaengine_terminate_all(tqspi->tx_dma_chan); 1434 dev_err(tqspi->dev, "failed TX DMA transfer\n"); 1435 num_errors++; 1436 } 1437 } 1438 } 1439 1440 if (tqspi->cur_direction & DATA_DIR_RX) { 1441 if (tqspi->rx_status) { 1442 if (tqspi->rx_dma_chan) 1443 dmaengine_terminate_all(tqspi->rx_dma_chan); 1444 num_errors++; 1445 } else if (tqspi->rx_dma_chan) { 1446 wait_status = wait_for_completion_interruptible_timeout( 1447 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT); 1448 if (wait_status <= 0) { 1449 dmaengine_terminate_all(tqspi->rx_dma_chan); 1450 dev_err(tqspi->dev, "failed RX DMA transfer\n"); 1451 num_errors++; 1452 } 1453 } 1454 } 1455 1456 spin_lock_irqsave(&tqspi->lock, flags); 1457 1458 if (num_errors) { 1459 tegra_qspi_dma_unmap_xfer(tqspi, t); 1460 tegra_qspi_handle_error(tqspi); 1461 complete(&tqspi->xfer_completion); 1462 goto exit; 1463 } 1464 1465 if (tqspi->cur_direction & DATA_DIR_RX) 1466 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t); 1467 1468 if (tqspi->cur_direction & DATA_DIR_TX) 1469 tqspi->cur_pos = tqspi->cur_tx_pos; 1470 else 1471 tqspi->cur_pos = tqspi->cur_rx_pos; 1472 1473 if (tqspi->cur_pos == t->len) { 1474 tegra_qspi_dma_unmap_xfer(tqspi, t); 1475 complete(&tqspi->xfer_completion); 1476 goto exit; 1477 } 1478 1479 tegra_qspi_dma_unmap_xfer(tqspi, t); 1480 1481 /* continue transfer in current message */ 1482 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t); 1483 if (total_fifo_words > QSPI_FIFO_DEPTH) 1484 num_errors = tegra_qspi_start_dma_based_transfer(tqspi, t); 1485 else 1486 num_errors = tegra_qspi_start_cpu_based_transfer(tqspi, t); 1487 1488 exit: 1489 spin_unlock_irqrestore(&tqspi->lock, flags); 1490 return IRQ_HANDLED; 1491 } 1492 1493 static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) 1494 { 1495 struct tegra_qspi *tqspi = context_data; 1496 1497 tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 1498 1499 if (tqspi->cur_direction & DATA_DIR_TX) 1500 tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF); 1501 1502 if (tqspi->cur_direction & DATA_DIR_RX) 1503 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF); 1504 1505 tegra_qspi_mask_clear_irq(tqspi); 1506 1507 if (!tqspi->is_curr_dma_xfer) 1508 return handle_cpu_based_xfer(tqspi); 1509 1510 return handle_dma_based_xfer(tqspi); 1511 } 1512 1513 static struct tegra_qspi_soc_data tegra210_qspi_soc_data = { 1514 .has_ext_dma = true, 1515 .cmb_xfer_capable = false, 1516 .supports_tpm = false, 1517 .cs_count = 1, 1518 }; 1519 1520 static struct tegra_qspi_soc_data tegra186_qspi_soc_data = { 1521 .has_ext_dma = true, 1522 .cmb_xfer_capable = true, 1523 .supports_tpm = false, 1524 .cs_count = 1, 1525 }; 1526 1527 static struct tegra_qspi_soc_data tegra234_qspi_soc_data = { 1528 .has_ext_dma = false, 1529 .cmb_xfer_capable = true, 1530 .supports_tpm = true, 1531 .cs_count = 1, 1532 }; 1533 1534 static struct tegra_qspi_soc_data tegra241_qspi_soc_data = { 1535 .has_ext_dma = true, 1536 .cmb_xfer_capable = true, 1537 .supports_tpm = true, 1538 .cs_count = 4, 1539 }; 1540 1541 static const struct of_device_id tegra_qspi_of_match[] = { 1542 { 1543 .compatible = "nvidia,tegra210-qspi", 1544 .data = &tegra210_qspi_soc_data, 1545 }, { 1546 .compatible = "nvidia,tegra186-qspi", 1547 .data = &tegra186_qspi_soc_data, 1548 }, { 1549 .compatible = "nvidia,tegra194-qspi", 1550 .data = &tegra186_qspi_soc_data, 1551 }, { 1552 .compatible = "nvidia,tegra234-qspi", 1553 .data = &tegra234_qspi_soc_data, 1554 }, { 1555 .compatible = "nvidia,tegra241-qspi", 1556 .data = &tegra241_qspi_soc_data, 1557 }, 1558 {} 1559 }; 1560 1561 MODULE_DEVICE_TABLE(of, tegra_qspi_of_match); 1562 1563 #ifdef CONFIG_ACPI 1564 static const struct acpi_device_id tegra_qspi_acpi_match[] = { 1565 { 1566 .id = "NVDA1213", 1567 .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data, 1568 }, { 1569 .id = "NVDA1313", 1570 .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data, 1571 }, { 1572 .id = "NVDA1413", 1573 .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data, 1574 }, { 1575 .id = "NVDA1513", 1576 .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data, 1577 }, 1578 {} 1579 }; 1580 1581 MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match); 1582 #endif 1583 1584 static int tegra_qspi_probe(struct platform_device *pdev) 1585 { 1586 struct spi_controller *host; 1587 struct tegra_qspi *tqspi; 1588 struct resource *r; 1589 int ret, qspi_irq; 1590 int bus_num; 1591 1592 host = devm_spi_alloc_host(&pdev->dev, sizeof(*tqspi)); 1593 if (!host) 1594 return -ENOMEM; 1595 1596 platform_set_drvdata(pdev, host); 1597 tqspi = spi_controller_get_devdata(host); 1598 1599 host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH | 1600 SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; 1601 host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); 1602 host->flags = SPI_CONTROLLER_HALF_DUPLEX; 1603 host->setup = tegra_qspi_setup; 1604 host->transfer_one_message = tegra_qspi_transfer_one_message; 1605 host->num_chipselect = 1; 1606 host->auto_runtime_pm = true; 1607 1608 bus_num = of_alias_get_id(pdev->dev.of_node, "spi"); 1609 if (bus_num >= 0) 1610 host->bus_num = bus_num; 1611 1612 tqspi->host = host; 1613 tqspi->dev = &pdev->dev; 1614 spin_lock_init(&tqspi->lock); 1615 1616 tqspi->soc_data = device_get_match_data(&pdev->dev); 1617 host->num_chipselect = tqspi->soc_data->cs_count; 1618 tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); 1619 if (IS_ERR(tqspi->base)) 1620 return PTR_ERR(tqspi->base); 1621 1622 tqspi->phys = r->start; 1623 qspi_irq = platform_get_irq(pdev, 0); 1624 if (qspi_irq < 0) 1625 return qspi_irq; 1626 tqspi->irq = qspi_irq; 1627 1628 if (!has_acpi_companion(tqspi->dev)) { 1629 tqspi->clk = devm_clk_get(&pdev->dev, "qspi"); 1630 if (IS_ERR(tqspi->clk)) { 1631 ret = PTR_ERR(tqspi->clk); 1632 dev_err(&pdev->dev, "failed to get clock: %d\n", ret); 1633 return ret; 1634 } 1635 1636 } 1637 1638 tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2; 1639 tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN; 1640 1641 ret = tegra_qspi_init_dma(tqspi); 1642 if (ret < 0) 1643 return ret; 1644 1645 if (tqspi->use_dma) 1646 tqspi->max_buf_size = tqspi->dma_buf_size; 1647 1648 init_completion(&tqspi->tx_dma_complete); 1649 init_completion(&tqspi->rx_dma_complete); 1650 init_completion(&tqspi->xfer_completion); 1651 1652 pm_runtime_enable(&pdev->dev); 1653 ret = pm_runtime_resume_and_get(&pdev->dev); 1654 if (ret < 0) { 1655 dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret); 1656 goto exit_pm_disable; 1657 } 1658 1659 if (device_reset(tqspi->dev) < 0) 1660 dev_warn_once(tqspi->dev, "device reset failed\n"); 1661 1662 tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL; 1663 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); 1664 tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1); 1665 tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2); 1666 tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2); 1667 1668 pm_runtime_put(&pdev->dev); 1669 1670 ret = request_threaded_irq(tqspi->irq, NULL, 1671 tegra_qspi_isr_thread, IRQF_ONESHOT, 1672 dev_name(&pdev->dev), tqspi); 1673 if (ret < 0) { 1674 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret); 1675 goto exit_pm_disable; 1676 } 1677 1678 host->dev.of_node = pdev->dev.of_node; 1679 ret = spi_register_controller(host); 1680 if (ret < 0) { 1681 dev_err(&pdev->dev, "failed to register host: %d\n", ret); 1682 goto exit_free_irq; 1683 } 1684 1685 return 0; 1686 1687 exit_free_irq: 1688 free_irq(qspi_irq, tqspi); 1689 exit_pm_disable: 1690 pm_runtime_force_suspend(&pdev->dev); 1691 tegra_qspi_deinit_dma(tqspi); 1692 return ret; 1693 } 1694 1695 static void tegra_qspi_remove(struct platform_device *pdev) 1696 { 1697 struct spi_controller *host = platform_get_drvdata(pdev); 1698 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1699 1700 spi_unregister_controller(host); 1701 free_irq(tqspi->irq, tqspi); 1702 pm_runtime_force_suspend(&pdev->dev); 1703 tegra_qspi_deinit_dma(tqspi); 1704 } 1705 1706 static int __maybe_unused tegra_qspi_suspend(struct device *dev) 1707 { 1708 struct spi_controller *host = dev_get_drvdata(dev); 1709 1710 return spi_controller_suspend(host); 1711 } 1712 1713 static int __maybe_unused tegra_qspi_resume(struct device *dev) 1714 { 1715 struct spi_controller *host = dev_get_drvdata(dev); 1716 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1717 int ret; 1718 1719 ret = pm_runtime_resume_and_get(dev); 1720 if (ret < 0) { 1721 dev_err(dev, "failed to get runtime PM: %d\n", ret); 1722 return ret; 1723 } 1724 1725 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); 1726 tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2); 1727 pm_runtime_put(dev); 1728 1729 return spi_controller_resume(host); 1730 } 1731 1732 static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev) 1733 { 1734 struct spi_controller *host = dev_get_drvdata(dev); 1735 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1736 1737 /* Runtime pm disabled with ACPI */ 1738 if (has_acpi_companion(tqspi->dev)) 1739 return 0; 1740 /* flush all write which are in PPSB queue by reading back */ 1741 tegra_qspi_readl(tqspi, QSPI_COMMAND1); 1742 1743 clk_disable_unprepare(tqspi->clk); 1744 1745 return 0; 1746 } 1747 1748 static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev) 1749 { 1750 struct spi_controller *host = dev_get_drvdata(dev); 1751 struct tegra_qspi *tqspi = spi_controller_get_devdata(host); 1752 int ret; 1753 1754 /* Runtime pm disabled with ACPI */ 1755 if (has_acpi_companion(tqspi->dev)) 1756 return 0; 1757 ret = clk_prepare_enable(tqspi->clk); 1758 if (ret < 0) 1759 dev_err(tqspi->dev, "failed to enable clock: %d\n", ret); 1760 1761 return ret; 1762 } 1763 1764 static const struct dev_pm_ops tegra_qspi_pm_ops = { 1765 SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL) 1766 SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume) 1767 }; 1768 1769 static struct platform_driver tegra_qspi_driver = { 1770 .driver = { 1771 .name = "tegra-qspi", 1772 .pm = &tegra_qspi_pm_ops, 1773 .of_match_table = tegra_qspi_of_match, 1774 .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match), 1775 }, 1776 .probe = tegra_qspi_probe, 1777 .remove = tegra_qspi_remove, 1778 }; 1779 module_platform_driver(tegra_qspi_driver); 1780 1781 MODULE_ALIAS("platform:qspi-tegra"); 1782 MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver"); 1783 MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>"); 1784 MODULE_LICENSE("GPL v2"); 1785