1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright (C) 2020 NVIDIA CORPORATION. 4 5 #include <linux/clk.h> 6 #include <linux/completion.h> 7 #include <linux/delay.h> 8 #include <linux/dmaengine.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/dmapool.h> 11 #include <linux/err.h> 12 #include <linux/interrupt.h> 13 #include <linux/io.h> 14 #include <linux/iopoll.h> 15 #include <linux/kernel.h> 16 #include <linux/kthread.h> 17 #include <linux/module.h> 18 #include <linux/platform_device.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/reset.h> 23 #include <linux/spi/spi.h> 24 #include <linux/acpi.h> 25 #include <linux/property.h> 26 27 #define QSPI_COMMAND1 0x000 28 #define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0) 29 #define QSPI_PACKED BIT(5) 30 #define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7) 31 #define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7) 32 #define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0) 33 #define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1) 34 #define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2) 35 #define QSPI_SDR_DDR_SEL BIT(9) 36 #define QSPI_TX_EN BIT(11) 37 #define QSPI_RX_EN BIT(12) 38 #define QSPI_CS_SW_VAL BIT(20) 39 #define QSPI_CS_SW_HW BIT(21) 40 41 #define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n))) 42 #define QSPI_CS_POL_INACTIVE_MASK (0xF << 22) 43 #define QSPI_CS_SEL_0 (0 << 26) 44 #define QSPI_CS_SEL_1 (1 << 26) 45 #define QSPI_CS_SEL_2 (2 << 26) 46 #define QSPI_CS_SEL_3 (3 << 26) 47 #define QSPI_CS_SEL_MASK (3 << 26) 48 #define QSPI_CS_SEL(x) (((x) & 0x3) << 26) 49 50 #define QSPI_CONTROL_MODE_0 (0 << 28) 51 #define QSPI_CONTROL_MODE_3 (3 << 28) 52 #define QSPI_CONTROL_MODE_MASK (3 << 28) 53 #define QSPI_M_S BIT(30) 54 #define QSPI_PIO BIT(31) 55 56 #define QSPI_COMMAND2 0x004 57 #define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10) 58 #define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0) 59 60 #define QSPI_CS_TIMING1 0x008 61 #define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold)) 62 63 #define QSPI_CS_TIMING2 0x00c 64 #define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0) 65 #define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5) 66 67 #define QSPI_TRANS_STATUS 0x010 68 #define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff) 69 #define QSPI_RDY BIT(30) 70 71 #define QSPI_FIFO_STATUS 0x014 72 #define QSPI_RX_FIFO_EMPTY BIT(0) 73 #define QSPI_RX_FIFO_FULL BIT(1) 74 #define QSPI_TX_FIFO_EMPTY BIT(2) 75 #define QSPI_TX_FIFO_FULL BIT(3) 76 #define QSPI_RX_FIFO_UNF BIT(4) 77 #define QSPI_RX_FIFO_OVF BIT(5) 78 #define QSPI_TX_FIFO_UNF BIT(6) 79 #define QSPI_TX_FIFO_OVF BIT(7) 80 #define QSPI_ERR BIT(8) 81 #define QSPI_TX_FIFO_FLUSH BIT(14) 82 #define QSPI_RX_FIFO_FLUSH BIT(15) 83 #define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f) 84 #define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f) 85 86 #define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \ 87 QSPI_RX_FIFO_OVF | \ 88 QSPI_TX_FIFO_UNF | \ 89 QSPI_TX_FIFO_OVF) 90 #define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \ 91 QSPI_TX_FIFO_EMPTY) 92 93 #define QSPI_TX_DATA 0x018 94 #define QSPI_RX_DATA 0x01c 95 96 #define QSPI_DMA_CTL 0x020 97 #define QSPI_TX_TRIG(n) (((n) & 0x3) << 15) 98 #define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0) 99 #define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1) 100 #define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2) 101 #define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3) 102 103 #define QSPI_RX_TRIG(n) (((n) & 0x3) << 19) 104 #define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0) 105 #define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1) 106 #define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2) 107 #define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3) 108 109 #define QSPI_DMA_EN BIT(31) 110 111 #define QSPI_DMA_BLK 0x024 112 #define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0) 113 114 #define QSPI_TX_FIFO 0x108 115 #define QSPI_RX_FIFO 0x188 116 117 #define QSPI_FIFO_DEPTH 64 118 119 #define QSPI_INTR_MASK 0x18c 120 #define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25) 121 #define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26) 122 #define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27) 123 #define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28) 124 #define QSPI_INTR_RDY_MASK BIT(29) 125 #define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \ 126 QSPI_INTR_RX_FIFO_OVF_MASK | \ 127 QSPI_INTR_TX_FIFO_UNF_MASK | \ 128 QSPI_INTR_TX_FIFO_OVF_MASK) 129 130 #define QSPI_MISC_REG 0x194 131 #define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0) 132 #define QSPI_DUMMY_CYCLES_MAX 0xff 133 134 #define QSPI_CMB_SEQ_CMD 0x19c 135 #define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0) 136 137 #define QSPI_CMB_SEQ_CMD_CFG 0x1a0 138 #define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13) 139 #define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13) 140 #define QSPI_COMMAND_SDR_DDR BIT(12) 141 #define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0) 142 143 #define QSPI_GLOBAL_CONFIG 0X1a4 144 #define QSPI_CMB_SEQ_EN BIT(0) 145 146 #define QSPI_CMB_SEQ_ADDR 0x1a8 147 #define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0) 148 149 #define QSPI_CMB_SEQ_ADDR_CFG 0x1ac 150 #define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13) 151 #define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13) 152 #define QSPI_ADDRESS_SDR_DDR BIT(12) 153 #define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0) 154 155 #define DATA_DIR_TX BIT(0) 156 #define DATA_DIR_RX BIT(1) 157 158 #define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) 159 #define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024) 160 #define CMD_TRANSFER 0 161 #define ADDR_TRANSFER 1 162 #define DATA_TRANSFER 2 163 164 struct tegra_qspi_soc_data { 165 bool has_dma; 166 bool cmb_xfer_capable; 167 unsigned int cs_count; 168 }; 169 170 struct tegra_qspi_client_data { 171 int tx_clk_tap_delay; 172 int rx_clk_tap_delay; 173 }; 174 175 struct tegra_qspi { 176 struct device *dev; 177 struct spi_master *master; 178 /* lock to protect data accessed by irq */ 179 spinlock_t lock; 180 181 struct clk *clk; 182 void __iomem *base; 183 phys_addr_t phys; 184 unsigned int irq; 185 186 u32 cur_speed; 187 unsigned int cur_pos; 188 unsigned int words_per_32bit; 189 unsigned int bytes_per_word; 190 unsigned int curr_dma_words; 191 unsigned int cur_direction; 192 193 unsigned int cur_rx_pos; 194 unsigned int cur_tx_pos; 195 196 unsigned int dma_buf_size; 197 unsigned int max_buf_size; 198 bool is_curr_dma_xfer; 199 200 struct completion rx_dma_complete; 201 struct completion tx_dma_complete; 202 203 u32 tx_status; 204 u32 rx_status; 205 u32 status_reg; 206 bool is_packed; 207 bool use_dma; 208 209 u32 command1_reg; 210 u32 dma_control_reg; 211 u32 def_command1_reg; 212 u32 def_command2_reg; 213 u32 spi_cs_timing1; 214 u32 spi_cs_timing2; 215 u8 dummy_cycles; 216 217 struct completion xfer_completion; 218 struct spi_transfer *curr_xfer; 219 220 struct dma_chan *rx_dma_chan; 221 u32 *rx_dma_buf; 222 dma_addr_t rx_dma_phys; 223 struct dma_async_tx_descriptor *rx_dma_desc; 224 225 struct dma_chan *tx_dma_chan; 226 u32 *tx_dma_buf; 227 dma_addr_t tx_dma_phys; 228 struct dma_async_tx_descriptor *tx_dma_desc; 229 const struct tegra_qspi_soc_data *soc_data; 230 }; 231 232 static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset) 233 { 234 return readl(tqspi->base + offset); 235 } 236 237 static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset) 238 { 239 writel(value, tqspi->base + offset); 240 241 /* read back register to make sure that register writes completed */ 242 if (offset != QSPI_TX_FIFO) 243 readl(tqspi->base + QSPI_COMMAND1); 244 } 245 246 static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi) 247 { 248 u32 value; 249 250 /* write 1 to clear status register */ 251 value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS); 252 tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS); 253 254 value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK); 255 if (!(value & QSPI_INTR_RDY_MASK)) { 256 value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR); 257 tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK); 258 } 259 260 /* clear fifo status error if any */ 261 value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 262 if (value & QSPI_ERR) 263 tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS); 264 } 265 266 static unsigned int 267 tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t) 268 { 269 unsigned int max_word, max_len, total_fifo_words; 270 unsigned int remain_len = t->len - tqspi->cur_pos; 271 unsigned int bits_per_word = t->bits_per_word; 272 273 tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8); 274 275 /* 276 * Tegra QSPI controller supports packed or unpacked mode transfers. 277 * Packed mode is used for data transfers using 8, 16, or 32 bits per 278 * word with a minimum transfer of 1 word and for all other transfers 279 * unpacked mode will be used. 280 */ 281 282 if ((bits_per_word == 8 || bits_per_word == 16 || 283 bits_per_word == 32) && t->len > 3) { 284 tqspi->is_packed = true; 285 tqspi->words_per_32bit = 32 / bits_per_word; 286 } else { 287 tqspi->is_packed = false; 288 tqspi->words_per_32bit = 1; 289 } 290 291 if (tqspi->is_packed) { 292 max_len = min(remain_len, tqspi->max_buf_size); 293 tqspi->curr_dma_words = max_len / tqspi->bytes_per_word; 294 total_fifo_words = (max_len + 3) / 4; 295 } else { 296 max_word = (remain_len - 1) / tqspi->bytes_per_word + 1; 297 max_word = min(max_word, tqspi->max_buf_size / 4); 298 tqspi->curr_dma_words = max_word; 299 total_fifo_words = max_word; 300 } 301 302 return total_fifo_words; 303 } 304 305 static unsigned int 306 tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 307 { 308 unsigned int written_words, fifo_words_left, count; 309 unsigned int len, tx_empty_count, max_n_32bit, i; 310 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; 311 u32 fifo_status; 312 313 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 314 tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status); 315 316 if (tqspi->is_packed) { 317 fifo_words_left = tx_empty_count * tqspi->words_per_32bit; 318 written_words = min(fifo_words_left, tqspi->curr_dma_words); 319 len = written_words * tqspi->bytes_per_word; 320 max_n_32bit = DIV_ROUND_UP(len, 4); 321 for (count = 0; count < max_n_32bit; count++) { 322 u32 x = 0; 323 324 for (i = 0; (i < 4) && len; i++, len--) 325 x |= (u32)(*tx_buf++) << (i * 8); 326 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO); 327 } 328 329 tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word; 330 } else { 331 unsigned int write_bytes; 332 u8 bytes_per_word = tqspi->bytes_per_word; 333 334 max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count); 335 written_words = max_n_32bit; 336 len = written_words * tqspi->bytes_per_word; 337 if (len > t->len - tqspi->cur_pos) 338 len = t->len - tqspi->cur_pos; 339 write_bytes = len; 340 for (count = 0; count < max_n_32bit; count++) { 341 u32 x = 0; 342 343 for (i = 0; len && (i < bytes_per_word); i++, len--) 344 x |= (u32)(*tx_buf++) << (i * 8); 345 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO); 346 } 347 348 tqspi->cur_tx_pos += write_bytes; 349 } 350 351 return written_words; 352 } 353 354 static unsigned int 355 tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 356 { 357 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos; 358 unsigned int len, rx_full_count, count, i; 359 unsigned int read_words = 0; 360 u32 fifo_status, x; 361 362 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 363 rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status); 364 if (tqspi->is_packed) { 365 len = tqspi->curr_dma_words * tqspi->bytes_per_word; 366 for (count = 0; count < rx_full_count; count++) { 367 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO); 368 369 for (i = 0; len && (i < 4); i++, len--) 370 *rx_buf++ = (x >> i * 8) & 0xff; 371 } 372 373 read_words += tqspi->curr_dma_words; 374 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; 375 } else { 376 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; 377 u8 bytes_per_word = tqspi->bytes_per_word; 378 unsigned int read_bytes; 379 380 len = rx_full_count * bytes_per_word; 381 if (len > t->len - tqspi->cur_pos) 382 len = t->len - tqspi->cur_pos; 383 read_bytes = len; 384 for (count = 0; count < rx_full_count; count++) { 385 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask; 386 387 for (i = 0; len && (i < bytes_per_word); i++, len--) 388 *rx_buf++ = (x >> (i * 8)) & 0xff; 389 } 390 391 read_words += rx_full_count; 392 tqspi->cur_rx_pos += read_bytes; 393 } 394 395 return read_words; 396 } 397 398 static void 399 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 400 { 401 dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys, 402 tqspi->dma_buf_size, DMA_TO_DEVICE); 403 404 /* 405 * In packed mode, each word in FIFO may contain multiple packets 406 * based on bits per word. So all bytes in each FIFO word are valid. 407 * 408 * In unpacked mode, each word in FIFO contains single packet and 409 * based on bits per word any remaining bits in FIFO word will be 410 * ignored by the hardware and are invalid bits. 411 */ 412 if (tqspi->is_packed) { 413 tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; 414 } else { 415 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; 416 unsigned int i, count, consume, write_bytes; 417 418 /* 419 * Fill tx_dma_buf to contain single packet in each word based 420 * on bits per word from SPI core tx_buf. 421 */ 422 consume = tqspi->curr_dma_words * tqspi->bytes_per_word; 423 if (consume > t->len - tqspi->cur_pos) 424 consume = t->len - tqspi->cur_pos; 425 write_bytes = consume; 426 for (count = 0; count < tqspi->curr_dma_words; count++) { 427 u32 x = 0; 428 429 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--) 430 x |= (u32)(*tx_buf++) << (i * 8); 431 tqspi->tx_dma_buf[count] = x; 432 } 433 434 tqspi->cur_tx_pos += write_bytes; 435 } 436 437 dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys, 438 tqspi->dma_buf_size, DMA_TO_DEVICE); 439 } 440 441 static void 442 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) 443 { 444 dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys, 445 tqspi->dma_buf_size, DMA_FROM_DEVICE); 446 447 if (tqspi->is_packed) { 448 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; 449 } else { 450 unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos; 451 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1; 452 unsigned int i, count, consume, read_bytes; 453 454 /* 455 * Each FIFO word contains single data packet. 456 * Skip invalid bits in each FIFO word based on bits per word 457 * and align bytes while filling in SPI core rx_buf. 458 */ 459 consume = tqspi->curr_dma_words * tqspi->bytes_per_word; 460 if (consume > t->len - tqspi->cur_pos) 461 consume = t->len - tqspi->cur_pos; 462 read_bytes = consume; 463 for (count = 0; count < tqspi->curr_dma_words; count++) { 464 u32 x = tqspi->rx_dma_buf[count] & rx_mask; 465 466 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--) 467 *rx_buf++ = (x >> (i * 8)) & 0xff; 468 } 469 470 tqspi->cur_rx_pos += read_bytes; 471 } 472 473 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, 474 tqspi->dma_buf_size, DMA_FROM_DEVICE); 475 } 476 477 static void tegra_qspi_dma_complete(void *args) 478 { 479 struct completion *dma_complete = args; 480 481 complete(dma_complete); 482 } 483 484 static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len) 485 { 486 dma_addr_t tx_dma_phys; 487 488 reinit_completion(&tqspi->tx_dma_complete); 489 490 if (tqspi->is_packed) 491 tx_dma_phys = t->tx_dma; 492 else 493 tx_dma_phys = tqspi->tx_dma_phys; 494 495 tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys, 496 len, DMA_MEM_TO_DEV, 497 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 498 499 if (!tqspi->tx_dma_desc) { 500 dev_err(tqspi->dev, "Unable to get TX descriptor\n"); 501 return -EIO; 502 } 503 504 tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete; 505 tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete; 506 dmaengine_submit(tqspi->tx_dma_desc); 507 dma_async_issue_pending(tqspi->tx_dma_chan); 508 509 return 0; 510 } 511 512 static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len) 513 { 514 dma_addr_t rx_dma_phys; 515 516 reinit_completion(&tqspi->rx_dma_complete); 517 518 if (tqspi->is_packed) 519 rx_dma_phys = t->rx_dma; 520 else 521 rx_dma_phys = tqspi->rx_dma_phys; 522 523 tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys, 524 len, DMA_DEV_TO_MEM, 525 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 526 527 if (!tqspi->rx_dma_desc) { 528 dev_err(tqspi->dev, "Unable to get RX descriptor\n"); 529 return -EIO; 530 } 531 532 tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete; 533 tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete; 534 dmaengine_submit(tqspi->rx_dma_desc); 535 dma_async_issue_pending(tqspi->rx_dma_chan); 536 537 return 0; 538 } 539 540 static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic) 541 { 542 void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS; 543 u32 val; 544 545 val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 546 if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY) 547 return 0; 548 549 val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH; 550 tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS); 551 552 if (!atomic) 553 return readl_relaxed_poll_timeout(addr, val, 554 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY, 555 1000, 1000000); 556 557 return readl_relaxed_poll_timeout_atomic(addr, val, 558 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY, 559 1000, 1000000); 560 } 561 562 static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi) 563 { 564 u32 intr_mask; 565 566 intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK); 567 intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR); 568 tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK); 569 } 570 571 static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 572 { 573 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos; 574 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos; 575 unsigned int len; 576 577 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 578 579 if (t->tx_buf) { 580 t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE); 581 if (dma_mapping_error(tqspi->dev, t->tx_dma)) 582 return -ENOMEM; 583 } 584 585 if (t->rx_buf) { 586 t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE); 587 if (dma_mapping_error(tqspi->dev, t->rx_dma)) { 588 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); 589 return -ENOMEM; 590 } 591 } 592 593 return 0; 594 } 595 596 static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 597 { 598 unsigned int len; 599 600 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 601 602 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); 603 dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); 604 } 605 606 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 607 { 608 struct dma_slave_config dma_sconfig = { 0 }; 609 unsigned int len; 610 u8 dma_burst; 611 int ret = 0; 612 u32 val; 613 614 if (tqspi->is_packed) { 615 ret = tegra_qspi_dma_map_xfer(tqspi, t); 616 if (ret < 0) 617 return ret; 618 } 619 620 val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1); 621 tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK); 622 623 tegra_qspi_unmask_irq(tqspi); 624 625 if (tqspi->is_packed) 626 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 627 else 628 len = tqspi->curr_dma_words * 4; 629 630 /* set attention level based on length of transfer */ 631 val = 0; 632 if (len & 0xf) { 633 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1; 634 dma_burst = 1; 635 } else if (((len) >> 4) & 0x1) { 636 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4; 637 dma_burst = 4; 638 } else { 639 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8; 640 dma_burst = 8; 641 } 642 643 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); 644 tqspi->dma_control_reg = val; 645 646 dma_sconfig.device_fc = true; 647 if (tqspi->cur_direction & DATA_DIR_TX) { 648 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; 649 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 650 dma_sconfig.dst_maxburst = dma_burst; 651 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); 652 if (ret < 0) { 653 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 654 return ret; 655 } 656 657 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); 658 ret = tegra_qspi_start_tx_dma(tqspi, t, len); 659 if (ret < 0) { 660 dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); 661 return ret; 662 } 663 } 664 665 if (tqspi->cur_direction & DATA_DIR_RX) { 666 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; 667 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 668 dma_sconfig.src_maxburst = dma_burst; 669 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); 670 if (ret < 0) { 671 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 672 return ret; 673 } 674 675 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, 676 tqspi->dma_buf_size, 677 DMA_FROM_DEVICE); 678 679 ret = tegra_qspi_start_rx_dma(tqspi, t, len); 680 if (ret < 0) { 681 dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); 682 if (tqspi->cur_direction & DATA_DIR_TX) 683 dmaengine_terminate_all(tqspi->tx_dma_chan); 684 return ret; 685 } 686 } 687 688 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); 689 690 tqspi->is_curr_dma_xfer = true; 691 tqspi->dma_control_reg = val; 692 val |= QSPI_DMA_EN; 693 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); 694 695 return ret; 696 } 697 698 static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t) 699 { 700 u32 val; 701 unsigned int cur_words; 702 703 if (qspi->cur_direction & DATA_DIR_TX) 704 cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t); 705 else 706 cur_words = qspi->curr_dma_words; 707 708 val = QSPI_DMA_BLK_SET(cur_words - 1); 709 tegra_qspi_writel(qspi, val, QSPI_DMA_BLK); 710 711 tegra_qspi_unmask_irq(qspi); 712 713 qspi->is_curr_dma_xfer = false; 714 val = qspi->command1_reg; 715 val |= QSPI_PIO; 716 tegra_qspi_writel(qspi, val, QSPI_COMMAND1); 717 718 return 0; 719 } 720 721 static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi) 722 { 723 if (!tqspi->soc_data->has_dma) 724 return; 725 726 if (tqspi->tx_dma_buf) { 727 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, 728 tqspi->tx_dma_buf, tqspi->tx_dma_phys); 729 tqspi->tx_dma_buf = NULL; 730 } 731 732 if (tqspi->tx_dma_chan) { 733 dma_release_channel(tqspi->tx_dma_chan); 734 tqspi->tx_dma_chan = NULL; 735 } 736 737 if (tqspi->rx_dma_buf) { 738 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, 739 tqspi->rx_dma_buf, tqspi->rx_dma_phys); 740 tqspi->rx_dma_buf = NULL; 741 } 742 743 if (tqspi->rx_dma_chan) { 744 dma_release_channel(tqspi->rx_dma_chan); 745 tqspi->rx_dma_chan = NULL; 746 } 747 } 748 749 static int tegra_qspi_init_dma(struct tegra_qspi *tqspi) 750 { 751 struct dma_chan *dma_chan; 752 dma_addr_t dma_phys; 753 u32 *dma_buf; 754 int err; 755 756 if (!tqspi->soc_data->has_dma) 757 return 0; 758 759 dma_chan = dma_request_chan(tqspi->dev, "rx"); 760 if (IS_ERR(dma_chan)) { 761 err = PTR_ERR(dma_chan); 762 goto err_out; 763 } 764 765 tqspi->rx_dma_chan = dma_chan; 766 767 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); 768 if (!dma_buf) { 769 err = -ENOMEM; 770 goto err_out; 771 } 772 773 tqspi->rx_dma_buf = dma_buf; 774 tqspi->rx_dma_phys = dma_phys; 775 776 dma_chan = dma_request_chan(tqspi->dev, "tx"); 777 if (IS_ERR(dma_chan)) { 778 err = PTR_ERR(dma_chan); 779 goto err_out; 780 } 781 782 tqspi->tx_dma_chan = dma_chan; 783 784 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); 785 if (!dma_buf) { 786 err = -ENOMEM; 787 goto err_out; 788 } 789 790 tqspi->tx_dma_buf = dma_buf; 791 tqspi->tx_dma_phys = dma_phys; 792 tqspi->use_dma = true; 793 794 return 0; 795 796 err_out: 797 tegra_qspi_deinit_dma(tqspi); 798 799 if (err != -EPROBE_DEFER) { 800 dev_err(tqspi->dev, "cannot use DMA: %d\n", err); 801 dev_err(tqspi->dev, "falling back to PIO\n"); 802 return 0; 803 } 804 805 return err; 806 } 807 808 static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t, 809 bool is_first_of_msg) 810 { 811 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); 812 struct tegra_qspi_client_data *cdata = spi->controller_data; 813 u32 command1, command2, speed = t->speed_hz; 814 u8 bits_per_word = t->bits_per_word; 815 u32 tx_tap = 0, rx_tap = 0; 816 int req_mode; 817 818 if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) { 819 clk_set_rate(tqspi->clk, speed); 820 tqspi->cur_speed = speed; 821 } 822 823 tqspi->cur_pos = 0; 824 tqspi->cur_rx_pos = 0; 825 tqspi->cur_tx_pos = 0; 826 tqspi->curr_xfer = t; 827 828 if (is_first_of_msg) { 829 tegra_qspi_mask_clear_irq(tqspi); 830 831 command1 = tqspi->def_command1_reg; 832 command1 |= QSPI_CS_SEL(spi->chip_select); 833 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); 834 835 command1 &= ~QSPI_CONTROL_MODE_MASK; 836 req_mode = spi->mode & 0x3; 837 if (req_mode == SPI_MODE_3) 838 command1 |= QSPI_CONTROL_MODE_3; 839 else 840 command1 |= QSPI_CONTROL_MODE_0; 841 842 if (spi->mode & SPI_CS_HIGH) 843 command1 |= QSPI_CS_SW_VAL; 844 else 845 command1 &= ~QSPI_CS_SW_VAL; 846 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1); 847 848 if (cdata && cdata->tx_clk_tap_delay) 849 tx_tap = cdata->tx_clk_tap_delay; 850 851 if (cdata && cdata->rx_clk_tap_delay) 852 rx_tap = cdata->rx_clk_tap_delay; 853 854 command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap); 855 if (command2 != tqspi->def_command2_reg) 856 tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2); 857 858 } else { 859 command1 = tqspi->command1_reg; 860 command1 &= ~QSPI_BIT_LENGTH(~0); 861 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1); 862 } 863 864 command1 &= ~QSPI_SDR_DDR_SEL; 865 866 return command1; 867 } 868 869 static int tegra_qspi_start_transfer_one(struct spi_device *spi, 870 struct spi_transfer *t, u32 command1) 871 { 872 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); 873 unsigned int total_fifo_words; 874 u8 bus_width = 0; 875 int ret; 876 877 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t); 878 879 command1 &= ~QSPI_PACKED; 880 if (tqspi->is_packed) 881 command1 |= QSPI_PACKED; 882 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1); 883 884 tqspi->cur_direction = 0; 885 886 command1 &= ~(QSPI_TX_EN | QSPI_RX_EN); 887 if (t->rx_buf) { 888 command1 |= QSPI_RX_EN; 889 tqspi->cur_direction |= DATA_DIR_RX; 890 bus_width = t->rx_nbits; 891 } 892 893 if (t->tx_buf) { 894 command1 |= QSPI_TX_EN; 895 tqspi->cur_direction |= DATA_DIR_TX; 896 bus_width = t->tx_nbits; 897 } 898 899 command1 &= ~QSPI_INTERFACE_WIDTH_MASK; 900 901 if (bus_width == SPI_NBITS_QUAD) 902 command1 |= QSPI_INTERFACE_WIDTH_QUAD; 903 else if (bus_width == SPI_NBITS_DUAL) 904 command1 |= QSPI_INTERFACE_WIDTH_DUAL; 905 else 906 command1 |= QSPI_INTERFACE_WIDTH_SINGLE; 907 908 tqspi->command1_reg = command1; 909 910 tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG); 911 912 ret = tegra_qspi_flush_fifos(tqspi, false); 913 if (ret < 0) 914 return ret; 915 916 if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH) 917 ret = tegra_qspi_start_dma_based_transfer(tqspi, t); 918 else 919 ret = tegra_qspi_start_cpu_based_transfer(tqspi, t); 920 921 return ret; 922 } 923 924 static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi) 925 { 926 struct tegra_qspi_client_data *cdata; 927 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); 928 929 cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL); 930 if (!cdata) 931 return NULL; 932 933 device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay", 934 &cdata->tx_clk_tap_delay); 935 device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay", 936 &cdata->rx_clk_tap_delay); 937 938 return cdata; 939 } 940 941 static int tegra_qspi_setup(struct spi_device *spi) 942 { 943 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); 944 struct tegra_qspi_client_data *cdata = spi->controller_data; 945 unsigned long flags; 946 u32 val; 947 int ret; 948 949 ret = pm_runtime_resume_and_get(tqspi->dev); 950 if (ret < 0) { 951 dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret); 952 return ret; 953 } 954 955 if (!cdata) { 956 cdata = tegra_qspi_parse_cdata_dt(spi); 957 spi->controller_data = cdata; 958 } 959 spin_lock_irqsave(&tqspi->lock, flags); 960 961 /* keep default cs state to inactive */ 962 val = tqspi->def_command1_reg; 963 val |= QSPI_CS_SEL(spi->chip_select); 964 if (spi->mode & SPI_CS_HIGH) 965 val &= ~QSPI_CS_POL_INACTIVE(spi->chip_select); 966 else 967 val |= QSPI_CS_POL_INACTIVE(spi->chip_select); 968 969 tqspi->def_command1_reg = val; 970 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); 971 972 spin_unlock_irqrestore(&tqspi->lock, flags); 973 974 pm_runtime_put(tqspi->dev); 975 976 return 0; 977 } 978 979 static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi) 980 { 981 dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n"); 982 dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n", 983 tegra_qspi_readl(tqspi, QSPI_COMMAND1), 984 tegra_qspi_readl(tqspi, QSPI_COMMAND2)); 985 dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n", 986 tegra_qspi_readl(tqspi, QSPI_DMA_CTL), 987 tegra_qspi_readl(tqspi, QSPI_DMA_BLK)); 988 dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n", 989 tegra_qspi_readl(tqspi, QSPI_INTR_MASK), 990 tegra_qspi_readl(tqspi, QSPI_MISC_REG)); 991 dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n", 992 tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS), 993 tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS)); 994 } 995 996 static void tegra_qspi_handle_error(struct tegra_qspi *tqspi) 997 { 998 dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg); 999 tegra_qspi_dump_regs(tqspi); 1000 tegra_qspi_flush_fifos(tqspi, true); 1001 if (device_reset(tqspi->dev) < 0) 1002 dev_warn_once(tqspi->dev, "device reset failed\n"); 1003 } 1004 1005 static void tegra_qspi_transfer_end(struct spi_device *spi) 1006 { 1007 struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master); 1008 int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1; 1009 1010 if (cs_val) 1011 tqspi->command1_reg |= QSPI_CS_SW_VAL; 1012 else 1013 tqspi->command1_reg &= ~QSPI_CS_SW_VAL; 1014 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); 1015 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); 1016 } 1017 1018 static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len) 1019 { 1020 u32 cmd_config = 0; 1021 1022 /* Extract Command configuration and value */ 1023 if (is_ddr) 1024 cmd_config |= QSPI_COMMAND_SDR_DDR; 1025 else 1026 cmd_config &= ~QSPI_COMMAND_SDR_DDR; 1027 1028 cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width); 1029 cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1); 1030 1031 return cmd_config; 1032 } 1033 1034 static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len) 1035 { 1036 u32 addr_config = 0; 1037 1038 /* Extract Address configuration and value */ 1039 is_ddr = 0; //Only SDR mode supported 1040 bus_width = 0; //X1 mode 1041 1042 if (is_ddr) 1043 addr_config |= QSPI_ADDRESS_SDR_DDR; 1044 else 1045 addr_config &= ~QSPI_ADDRESS_SDR_DDR; 1046 1047 addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width); 1048 addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1); 1049 1050 return addr_config; 1051 } 1052 1053 static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, 1054 struct spi_message *msg) 1055 { 1056 bool is_first_msg = true; 1057 struct spi_transfer *xfer; 1058 struct spi_device *spi = msg->spi; 1059 u8 transfer_phase = 0; 1060 u32 cmd1 = 0, dma_ctl = 0; 1061 int ret = 0; 1062 u32 address_value = 0; 1063 u32 cmd_config = 0, addr_config = 0; 1064 u8 cmd_value = 0, val = 0; 1065 1066 /* Enable Combined sequence mode */ 1067 val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG); 1068 val |= QSPI_CMB_SEQ_EN; 1069 tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG); 1070 /* Process individual transfer list */ 1071 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1072 switch (transfer_phase) { 1073 case CMD_TRANSFER: 1074 /* X1 SDR mode */ 1075 cmd_config = tegra_qspi_cmd_config(false, 0, 1076 xfer->len); 1077 cmd_value = *((const u8 *)(xfer->tx_buf)); 1078 break; 1079 case ADDR_TRANSFER: 1080 /* X1 SDR mode */ 1081 addr_config = tegra_qspi_addr_config(false, 0, 1082 xfer->len); 1083 address_value = *((const u32 *)(xfer->tx_buf)); 1084 break; 1085 case DATA_TRANSFER: 1086 /* Program Command, Address value in register */ 1087 tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD); 1088 tegra_qspi_writel(tqspi, address_value, 1089 QSPI_CMB_SEQ_ADDR); 1090 /* Program Command and Address config in register */ 1091 tegra_qspi_writel(tqspi, cmd_config, 1092 QSPI_CMB_SEQ_CMD_CFG); 1093 tegra_qspi_writel(tqspi, addr_config, 1094 QSPI_CMB_SEQ_ADDR_CFG); 1095 1096 reinit_completion(&tqspi->xfer_completion); 1097 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, 1098 is_first_msg); 1099 ret = tegra_qspi_start_transfer_one(spi, xfer, 1100 cmd1); 1101 1102 if (ret < 0) { 1103 dev_err(tqspi->dev, "Failed to start transfer-one: %d\n", 1104 ret); 1105 return ret; 1106 } 1107 1108 is_first_msg = false; 1109 ret = wait_for_completion_timeout 1110 (&tqspi->xfer_completion, 1111 QSPI_DMA_TIMEOUT); 1112 1113 if (WARN_ON(ret == 0)) { 1114 dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n", 1115 ret); 1116 if (tqspi->is_curr_dma_xfer && 1117 (tqspi->cur_direction & DATA_DIR_TX)) 1118 dmaengine_terminate_all 1119 (tqspi->tx_dma_chan); 1120 1121 if (tqspi->is_curr_dma_xfer && 1122 (tqspi->cur_direction & DATA_DIR_RX)) 1123 dmaengine_terminate_all 1124 (tqspi->rx_dma_chan); 1125 1126 /* Abort transfer by resetting pio/dma bit */ 1127 if (!tqspi->is_curr_dma_xfer) { 1128 cmd1 = tegra_qspi_readl 1129 (tqspi, 1130 QSPI_COMMAND1); 1131 cmd1 &= ~QSPI_PIO; 1132 tegra_qspi_writel 1133 (tqspi, cmd1, 1134 QSPI_COMMAND1); 1135 } else { 1136 dma_ctl = tegra_qspi_readl 1137 (tqspi, 1138 QSPI_DMA_CTL); 1139 dma_ctl &= ~QSPI_DMA_EN; 1140 tegra_qspi_writel(tqspi, dma_ctl, 1141 QSPI_DMA_CTL); 1142 } 1143 1144 /* Reset controller if timeout happens */ 1145 if (device_reset(tqspi->dev) < 0) 1146 dev_warn_once(tqspi->dev, 1147 "device reset failed\n"); 1148 ret = -EIO; 1149 goto exit; 1150 } 1151 1152 if (tqspi->tx_status || tqspi->rx_status) { 1153 dev_err(tqspi->dev, "QSPI Transfer failed\n"); 1154 tqspi->tx_status = 0; 1155 tqspi->rx_status = 0; 1156 ret = -EIO; 1157 goto exit; 1158 } 1159 if (!xfer->cs_change) { 1160 tegra_qspi_transfer_end(spi); 1161 spi_transfer_delay_exec(xfer); 1162 } 1163 break; 1164 default: 1165 ret = -EINVAL; 1166 goto exit; 1167 } 1168 msg->actual_length += xfer->len; 1169 transfer_phase++; 1170 } 1171 ret = 0; 1172 1173 exit: 1174 msg->status = ret; 1175 if (ret < 0) { 1176 tegra_qspi_transfer_end(spi); 1177 spi_transfer_delay_exec(xfer); 1178 } 1179 1180 return ret; 1181 } 1182 1183 static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi, 1184 struct spi_message *msg) 1185 { 1186 struct spi_device *spi = msg->spi; 1187 struct spi_transfer *transfer; 1188 bool is_first_msg = true; 1189 int ret = 0, val = 0; 1190 1191 msg->status = 0; 1192 msg->actual_length = 0; 1193 tqspi->tx_status = 0; 1194 tqspi->rx_status = 0; 1195 1196 /* Disable Combined sequence mode */ 1197 val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG); 1198 val &= ~QSPI_CMB_SEQ_EN; 1199 tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG); 1200 list_for_each_entry(transfer, &msg->transfers, transfer_list) { 1201 struct spi_transfer *xfer = transfer; 1202 u8 dummy_bytes = 0; 1203 u32 cmd1; 1204 1205 tqspi->dummy_cycles = 0; 1206 /* 1207 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer 1208 * bytes based on programmed dummy clock cycles in the QSPI_MISC register. 1209 * So, check if the next transfer is dummy data transfer and program dummy 1210 * clock cycles along with the current transfer and skip next transfer. 1211 */ 1212 if (!list_is_last(&xfer->transfer_list, &msg->transfers)) { 1213 struct spi_transfer *next_xfer; 1214 1215 next_xfer = list_next_entry(xfer, transfer_list); 1216 if (next_xfer->dummy_data) { 1217 u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits; 1218 1219 if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) { 1220 tqspi->dummy_cycles = dummy_cycles; 1221 dummy_bytes = next_xfer->len; 1222 transfer = next_xfer; 1223 } 1224 } 1225 } 1226 1227 reinit_completion(&tqspi->xfer_completion); 1228 1229 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg); 1230 1231 ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1); 1232 if (ret < 0) { 1233 dev_err(tqspi->dev, "failed to start transfer: %d\n", ret); 1234 goto complete_xfer; 1235 } 1236 1237 ret = wait_for_completion_timeout(&tqspi->xfer_completion, 1238 QSPI_DMA_TIMEOUT); 1239 if (WARN_ON(ret == 0)) { 1240 dev_err(tqspi->dev, "transfer timeout\n"); 1241 if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX)) 1242 dmaengine_terminate_all(tqspi->tx_dma_chan); 1243 if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX)) 1244 dmaengine_terminate_all(tqspi->rx_dma_chan); 1245 tegra_qspi_handle_error(tqspi); 1246 ret = -EIO; 1247 goto complete_xfer; 1248 } 1249 1250 if (tqspi->tx_status || tqspi->rx_status) { 1251 tegra_qspi_handle_error(tqspi); 1252 ret = -EIO; 1253 goto complete_xfer; 1254 } 1255 1256 msg->actual_length += xfer->len + dummy_bytes; 1257 1258 complete_xfer: 1259 if (ret < 0) { 1260 tegra_qspi_transfer_end(spi); 1261 spi_transfer_delay_exec(xfer); 1262 goto exit; 1263 } 1264 1265 if (list_is_last(&xfer->transfer_list, &msg->transfers)) { 1266 /* de-activate CS after last transfer only when cs_change is not set */ 1267 if (!xfer->cs_change) { 1268 tegra_qspi_transfer_end(spi); 1269 spi_transfer_delay_exec(xfer); 1270 } 1271 } else if (xfer->cs_change) { 1272 /* de-activated CS between the transfers only when cs_change is set */ 1273 tegra_qspi_transfer_end(spi); 1274 spi_transfer_delay_exec(xfer); 1275 } 1276 } 1277 1278 ret = 0; 1279 exit: 1280 msg->status = ret; 1281 1282 return ret; 1283 } 1284 1285 static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi, 1286 struct spi_message *msg) 1287 { 1288 int transfer_count = 0; 1289 struct spi_transfer *xfer; 1290 1291 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1292 transfer_count++; 1293 } 1294 if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3) 1295 return false; 1296 xfer = list_first_entry(&msg->transfers, typeof(*xfer), 1297 transfer_list); 1298 if (xfer->len > 2) 1299 return false; 1300 xfer = list_next_entry(xfer, transfer_list); 1301 if (xfer->len > 4 || xfer->len < 3) 1302 return false; 1303 xfer = list_next_entry(xfer, transfer_list); 1304 if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) 1305 return false; 1306 1307 return true; 1308 } 1309 1310 static int tegra_qspi_transfer_one_message(struct spi_master *master, 1311 struct spi_message *msg) 1312 { 1313 struct tegra_qspi *tqspi = spi_master_get_devdata(master); 1314 int ret; 1315 1316 if (tegra_qspi_validate_cmb_seq(tqspi, msg)) 1317 ret = tegra_qspi_combined_seq_xfer(tqspi, msg); 1318 else 1319 ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg); 1320 1321 spi_finalize_current_message(master); 1322 1323 return ret; 1324 } 1325 1326 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi) 1327 { 1328 struct spi_transfer *t = tqspi->curr_xfer; 1329 unsigned long flags; 1330 1331 spin_lock_irqsave(&tqspi->lock, flags); 1332 1333 if (tqspi->tx_status || tqspi->rx_status) { 1334 tegra_qspi_handle_error(tqspi); 1335 complete(&tqspi->xfer_completion); 1336 goto exit; 1337 } 1338 1339 if (tqspi->cur_direction & DATA_DIR_RX) 1340 tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t); 1341 1342 if (tqspi->cur_direction & DATA_DIR_TX) 1343 tqspi->cur_pos = tqspi->cur_tx_pos; 1344 else 1345 tqspi->cur_pos = tqspi->cur_rx_pos; 1346 1347 if (tqspi->cur_pos == t->len) { 1348 complete(&tqspi->xfer_completion); 1349 goto exit; 1350 } 1351 1352 tegra_qspi_calculate_curr_xfer_param(tqspi, t); 1353 tegra_qspi_start_cpu_based_transfer(tqspi, t); 1354 exit: 1355 spin_unlock_irqrestore(&tqspi->lock, flags); 1356 return IRQ_HANDLED; 1357 } 1358 1359 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) 1360 { 1361 struct spi_transfer *t = tqspi->curr_xfer; 1362 unsigned int total_fifo_words; 1363 unsigned long flags; 1364 long wait_status; 1365 int err = 0; 1366 1367 if (tqspi->cur_direction & DATA_DIR_TX) { 1368 if (tqspi->tx_status) { 1369 dmaengine_terminate_all(tqspi->tx_dma_chan); 1370 err += 1; 1371 } else { 1372 wait_status = wait_for_completion_interruptible_timeout( 1373 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT); 1374 if (wait_status <= 0) { 1375 dmaengine_terminate_all(tqspi->tx_dma_chan); 1376 dev_err(tqspi->dev, "failed TX DMA transfer\n"); 1377 err += 1; 1378 } 1379 } 1380 } 1381 1382 if (tqspi->cur_direction & DATA_DIR_RX) { 1383 if (tqspi->rx_status) { 1384 dmaengine_terminate_all(tqspi->rx_dma_chan); 1385 err += 2; 1386 } else { 1387 wait_status = wait_for_completion_interruptible_timeout( 1388 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT); 1389 if (wait_status <= 0) { 1390 dmaengine_terminate_all(tqspi->rx_dma_chan); 1391 dev_err(tqspi->dev, "failed RX DMA transfer\n"); 1392 err += 2; 1393 } 1394 } 1395 } 1396 1397 spin_lock_irqsave(&tqspi->lock, flags); 1398 1399 if (err) { 1400 tegra_qspi_dma_unmap_xfer(tqspi, t); 1401 tegra_qspi_handle_error(tqspi); 1402 complete(&tqspi->xfer_completion); 1403 goto exit; 1404 } 1405 1406 if (tqspi->cur_direction & DATA_DIR_RX) 1407 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t); 1408 1409 if (tqspi->cur_direction & DATA_DIR_TX) 1410 tqspi->cur_pos = tqspi->cur_tx_pos; 1411 else 1412 tqspi->cur_pos = tqspi->cur_rx_pos; 1413 1414 if (tqspi->cur_pos == t->len) { 1415 tegra_qspi_dma_unmap_xfer(tqspi, t); 1416 complete(&tqspi->xfer_completion); 1417 goto exit; 1418 } 1419 1420 tegra_qspi_dma_unmap_xfer(tqspi, t); 1421 1422 /* continue transfer in current message */ 1423 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t); 1424 if (total_fifo_words > QSPI_FIFO_DEPTH) 1425 err = tegra_qspi_start_dma_based_transfer(tqspi, t); 1426 else 1427 err = tegra_qspi_start_cpu_based_transfer(tqspi, t); 1428 1429 exit: 1430 spin_unlock_irqrestore(&tqspi->lock, flags); 1431 return IRQ_HANDLED; 1432 } 1433 1434 static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) 1435 { 1436 struct tegra_qspi *tqspi = context_data; 1437 1438 tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS); 1439 1440 if (tqspi->cur_direction & DATA_DIR_TX) 1441 tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF); 1442 1443 if (tqspi->cur_direction & DATA_DIR_RX) 1444 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF); 1445 1446 tegra_qspi_mask_clear_irq(tqspi); 1447 1448 if (!tqspi->is_curr_dma_xfer) 1449 return handle_cpu_based_xfer(tqspi); 1450 1451 return handle_dma_based_xfer(tqspi); 1452 } 1453 1454 static struct tegra_qspi_soc_data tegra210_qspi_soc_data = { 1455 .has_dma = true, 1456 .cmb_xfer_capable = false, 1457 .cs_count = 1, 1458 }; 1459 1460 static struct tegra_qspi_soc_data tegra186_qspi_soc_data = { 1461 .has_dma = true, 1462 .cmb_xfer_capable = true, 1463 .cs_count = 1, 1464 }; 1465 1466 static struct tegra_qspi_soc_data tegra234_qspi_soc_data = { 1467 .has_dma = false, 1468 .cmb_xfer_capable = true, 1469 .cs_count = 1, 1470 }; 1471 1472 static struct tegra_qspi_soc_data tegra241_qspi_soc_data = { 1473 .has_dma = false, 1474 .cmb_xfer_capable = true, 1475 .cs_count = 4, 1476 }; 1477 1478 static const struct of_device_id tegra_qspi_of_match[] = { 1479 { 1480 .compatible = "nvidia,tegra210-qspi", 1481 .data = &tegra210_qspi_soc_data, 1482 }, { 1483 .compatible = "nvidia,tegra186-qspi", 1484 .data = &tegra186_qspi_soc_data, 1485 }, { 1486 .compatible = "nvidia,tegra194-qspi", 1487 .data = &tegra186_qspi_soc_data, 1488 }, { 1489 .compatible = "nvidia,tegra234-qspi", 1490 .data = &tegra234_qspi_soc_data, 1491 }, { 1492 .compatible = "nvidia,tegra241-qspi", 1493 .data = &tegra241_qspi_soc_data, 1494 }, 1495 {} 1496 }; 1497 1498 MODULE_DEVICE_TABLE(of, tegra_qspi_of_match); 1499 1500 #ifdef CONFIG_ACPI 1501 static const struct acpi_device_id tegra_qspi_acpi_match[] = { 1502 { 1503 .id = "NVDA1213", 1504 .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data, 1505 }, { 1506 .id = "NVDA1313", 1507 .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data, 1508 }, { 1509 .id = "NVDA1413", 1510 .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data, 1511 }, { 1512 .id = "NVDA1513", 1513 .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data, 1514 }, 1515 {} 1516 }; 1517 1518 MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match); 1519 #endif 1520 1521 static int tegra_qspi_probe(struct platform_device *pdev) 1522 { 1523 struct spi_master *master; 1524 struct tegra_qspi *tqspi; 1525 struct resource *r; 1526 int ret, qspi_irq; 1527 int bus_num; 1528 1529 master = devm_spi_alloc_master(&pdev->dev, sizeof(*tqspi)); 1530 if (!master) 1531 return -ENOMEM; 1532 1533 platform_set_drvdata(pdev, master); 1534 tqspi = spi_master_get_devdata(master); 1535 1536 master->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH | 1537 SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; 1538 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); 1539 master->flags = SPI_CONTROLLER_HALF_DUPLEX; 1540 master->setup = tegra_qspi_setup; 1541 master->transfer_one_message = tegra_qspi_transfer_one_message; 1542 master->num_chipselect = 1; 1543 master->auto_runtime_pm = true; 1544 1545 bus_num = of_alias_get_id(pdev->dev.of_node, "spi"); 1546 if (bus_num >= 0) 1547 master->bus_num = bus_num; 1548 1549 tqspi->master = master; 1550 tqspi->dev = &pdev->dev; 1551 spin_lock_init(&tqspi->lock); 1552 1553 tqspi->soc_data = device_get_match_data(&pdev->dev); 1554 master->num_chipselect = tqspi->soc_data->cs_count; 1555 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1556 tqspi->base = devm_ioremap_resource(&pdev->dev, r); 1557 if (IS_ERR(tqspi->base)) 1558 return PTR_ERR(tqspi->base); 1559 1560 tqspi->phys = r->start; 1561 qspi_irq = platform_get_irq(pdev, 0); 1562 if (qspi_irq < 0) 1563 return qspi_irq; 1564 tqspi->irq = qspi_irq; 1565 1566 if (!has_acpi_companion(tqspi->dev)) { 1567 tqspi->clk = devm_clk_get(&pdev->dev, "qspi"); 1568 if (IS_ERR(tqspi->clk)) { 1569 ret = PTR_ERR(tqspi->clk); 1570 dev_err(&pdev->dev, "failed to get clock: %d\n", ret); 1571 return ret; 1572 } 1573 1574 } 1575 1576 tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2; 1577 tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN; 1578 1579 ret = tegra_qspi_init_dma(tqspi); 1580 if (ret < 0) 1581 return ret; 1582 1583 if (tqspi->use_dma) 1584 tqspi->max_buf_size = tqspi->dma_buf_size; 1585 1586 init_completion(&tqspi->tx_dma_complete); 1587 init_completion(&tqspi->rx_dma_complete); 1588 init_completion(&tqspi->xfer_completion); 1589 1590 pm_runtime_enable(&pdev->dev); 1591 ret = pm_runtime_resume_and_get(&pdev->dev); 1592 if (ret < 0) { 1593 dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret); 1594 goto exit_pm_disable; 1595 } 1596 1597 if (device_reset(tqspi->dev) < 0) 1598 dev_warn_once(tqspi->dev, "device reset failed\n"); 1599 1600 tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL; 1601 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1); 1602 tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1); 1603 tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2); 1604 tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2); 1605 1606 pm_runtime_put(&pdev->dev); 1607 1608 ret = request_threaded_irq(tqspi->irq, NULL, 1609 tegra_qspi_isr_thread, IRQF_ONESHOT, 1610 dev_name(&pdev->dev), tqspi); 1611 if (ret < 0) { 1612 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret); 1613 goto exit_pm_disable; 1614 } 1615 1616 master->dev.of_node = pdev->dev.of_node; 1617 ret = spi_register_master(master); 1618 if (ret < 0) { 1619 dev_err(&pdev->dev, "failed to register master: %d\n", ret); 1620 goto exit_free_irq; 1621 } 1622 1623 return 0; 1624 1625 exit_free_irq: 1626 free_irq(qspi_irq, tqspi); 1627 exit_pm_disable: 1628 pm_runtime_force_suspend(&pdev->dev); 1629 tegra_qspi_deinit_dma(tqspi); 1630 return ret; 1631 } 1632 1633 static int tegra_qspi_remove(struct platform_device *pdev) 1634 { 1635 struct spi_master *master = platform_get_drvdata(pdev); 1636 struct tegra_qspi *tqspi = spi_master_get_devdata(master); 1637 1638 spi_unregister_master(master); 1639 free_irq(tqspi->irq, tqspi); 1640 pm_runtime_force_suspend(&pdev->dev); 1641 tegra_qspi_deinit_dma(tqspi); 1642 1643 return 0; 1644 } 1645 1646 static int __maybe_unused tegra_qspi_suspend(struct device *dev) 1647 { 1648 struct spi_master *master = dev_get_drvdata(dev); 1649 1650 return spi_master_suspend(master); 1651 } 1652 1653 static int __maybe_unused tegra_qspi_resume(struct device *dev) 1654 { 1655 struct spi_master *master = dev_get_drvdata(dev); 1656 struct tegra_qspi *tqspi = spi_master_get_devdata(master); 1657 int ret; 1658 1659 ret = pm_runtime_resume_and_get(dev); 1660 if (ret < 0) { 1661 dev_err(dev, "failed to get runtime PM: %d\n", ret); 1662 return ret; 1663 } 1664 1665 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1); 1666 tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2); 1667 pm_runtime_put(dev); 1668 1669 return spi_master_resume(master); 1670 } 1671 1672 static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev) 1673 { 1674 struct spi_master *master = dev_get_drvdata(dev); 1675 struct tegra_qspi *tqspi = spi_master_get_devdata(master); 1676 1677 /* Runtime pm disabled with ACPI */ 1678 if (has_acpi_companion(tqspi->dev)) 1679 return 0; 1680 /* flush all write which are in PPSB queue by reading back */ 1681 tegra_qspi_readl(tqspi, QSPI_COMMAND1); 1682 1683 clk_disable_unprepare(tqspi->clk); 1684 1685 return 0; 1686 } 1687 1688 static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev) 1689 { 1690 struct spi_master *master = dev_get_drvdata(dev); 1691 struct tegra_qspi *tqspi = spi_master_get_devdata(master); 1692 int ret; 1693 1694 /* Runtime pm disabled with ACPI */ 1695 if (has_acpi_companion(tqspi->dev)) 1696 return 0; 1697 ret = clk_prepare_enable(tqspi->clk); 1698 if (ret < 0) 1699 dev_err(tqspi->dev, "failed to enable clock: %d\n", ret); 1700 1701 return ret; 1702 } 1703 1704 static const struct dev_pm_ops tegra_qspi_pm_ops = { 1705 SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL) 1706 SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume) 1707 }; 1708 1709 static struct platform_driver tegra_qspi_driver = { 1710 .driver = { 1711 .name = "tegra-qspi", 1712 .pm = &tegra_qspi_pm_ops, 1713 .of_match_table = tegra_qspi_of_match, 1714 .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match), 1715 }, 1716 .probe = tegra_qspi_probe, 1717 .remove = tegra_qspi_remove, 1718 }; 1719 module_platform_driver(tegra_qspi_driver); 1720 1721 MODULE_ALIAS("platform:qspi-tegra"); 1722 MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver"); 1723 MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>"); 1724 MODULE_LICENSE("GPL v2"); 1725