1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright (C) 2020 NVIDIA CORPORATION.
4
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmapool.h>
11 #include <linux/err.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/kernel.h>
16 #include <linux/kthread.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/of.h>
21 #include <linux/reset.h>
22 #include <linux/spi/spi.h>
23 #include <linux/acpi.h>
24 #include <linux/property.h>
25 #include <linux/sizes.h>
26
27 #define QSPI_COMMAND1 0x000
28 #define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
29 #define QSPI_PACKED BIT(5)
30 #define QSPI_INTERFACE_WIDTH_MASK (0x03 << 7)
31 #define QSPI_INTERFACE_WIDTH(x) (((x) & 0x03) << 7)
32 #define QSPI_INTERFACE_WIDTH_SINGLE QSPI_INTERFACE_WIDTH(0)
33 #define QSPI_INTERFACE_WIDTH_DUAL QSPI_INTERFACE_WIDTH(1)
34 #define QSPI_INTERFACE_WIDTH_QUAD QSPI_INTERFACE_WIDTH(2)
35 #define QSPI_SDR_DDR_SEL BIT(9)
36 #define QSPI_TX_EN BIT(11)
37 #define QSPI_RX_EN BIT(12)
38 #define QSPI_CS_SW_VAL BIT(20)
39 #define QSPI_CS_SW_HW BIT(21)
40
41 #define QSPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
42 #define QSPI_CS_POL_INACTIVE_MASK (0xF << 22)
43 #define QSPI_CS_SEL_0 (0 << 26)
44 #define QSPI_CS_SEL_1 (1 << 26)
45 #define QSPI_CS_SEL_2 (2 << 26)
46 #define QSPI_CS_SEL_3 (3 << 26)
47 #define QSPI_CS_SEL_MASK (3 << 26)
48 #define QSPI_CS_SEL(x) (((x) & 0x3) << 26)
49
50 #define QSPI_CONTROL_MODE_0 (0 << 28)
51 #define QSPI_CONTROL_MODE_3 (3 << 28)
52 #define QSPI_CONTROL_MODE_MASK (3 << 28)
53 #define QSPI_M_S BIT(30)
54 #define QSPI_PIO BIT(31)
55
56 #define QSPI_COMMAND2 0x004
57 #define QSPI_TX_TAP_DELAY(x) (((x) & 0x3f) << 10)
58 #define QSPI_RX_TAP_DELAY(x) (((x) & 0xff) << 0)
59
60 #define QSPI_CS_TIMING1 0x008
61 #define QSPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
62
63 #define QSPI_CS_TIMING2 0x00c
64 #define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1f) << 0)
65 #define CS_ACTIVE_BETWEEN_PACKETS_0 BIT(5)
66
67 #define QSPI_TRANS_STATUS 0x010
68 #define QSPI_BLK_CNT(val) (((val) >> 0) & 0xffff)
69 #define QSPI_RDY BIT(30)
70
71 #define QSPI_FIFO_STATUS 0x014
72 #define QSPI_RX_FIFO_EMPTY BIT(0)
73 #define QSPI_RX_FIFO_FULL BIT(1)
74 #define QSPI_TX_FIFO_EMPTY BIT(2)
75 #define QSPI_TX_FIFO_FULL BIT(3)
76 #define QSPI_RX_FIFO_UNF BIT(4)
77 #define QSPI_RX_FIFO_OVF BIT(5)
78 #define QSPI_TX_FIFO_UNF BIT(6)
79 #define QSPI_TX_FIFO_OVF BIT(7)
80 #define QSPI_ERR BIT(8)
81 #define QSPI_TX_FIFO_FLUSH BIT(14)
82 #define QSPI_RX_FIFO_FLUSH BIT(15)
83 #define QSPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7f)
84 #define QSPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7f)
85
86 #define QSPI_FIFO_ERROR (QSPI_RX_FIFO_UNF | \
87 QSPI_RX_FIFO_OVF | \
88 QSPI_TX_FIFO_UNF | \
89 QSPI_TX_FIFO_OVF)
90 #define QSPI_FIFO_EMPTY (QSPI_RX_FIFO_EMPTY | \
91 QSPI_TX_FIFO_EMPTY)
92
93 #define QSPI_TX_DATA 0x018
94 #define QSPI_RX_DATA 0x01c
95
96 #define QSPI_DMA_CTL 0x020
97 #define QSPI_TX_TRIG(n) (((n) & 0x3) << 15)
98 #define QSPI_TX_TRIG_1 QSPI_TX_TRIG(0)
99 #define QSPI_TX_TRIG_4 QSPI_TX_TRIG(1)
100 #define QSPI_TX_TRIG_8 QSPI_TX_TRIG(2)
101 #define QSPI_TX_TRIG_16 QSPI_TX_TRIG(3)
102
103 #define QSPI_RX_TRIG(n) (((n) & 0x3) << 19)
104 #define QSPI_RX_TRIG_1 QSPI_RX_TRIG(0)
105 #define QSPI_RX_TRIG_4 QSPI_RX_TRIG(1)
106 #define QSPI_RX_TRIG_8 QSPI_RX_TRIG(2)
107 #define QSPI_RX_TRIG_16 QSPI_RX_TRIG(3)
108
109 #define QSPI_DMA_EN BIT(31)
110
111 #define QSPI_DMA_BLK 0x024
112 #define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
113
114 #define QSPI_DMA_MEM_ADDRESS 0x028
115 #define QSPI_DMA_HI_ADDRESS 0x02c
116
117 #define QSPI_TX_FIFO 0x108
118 #define QSPI_RX_FIFO 0x188
119
120 #define QSPI_FIFO_DEPTH 64
121
122 #define QSPI_INTR_MASK 0x18c
123 #define QSPI_INTR_RX_FIFO_UNF_MASK BIT(25)
124 #define QSPI_INTR_RX_FIFO_OVF_MASK BIT(26)
125 #define QSPI_INTR_TX_FIFO_UNF_MASK BIT(27)
126 #define QSPI_INTR_TX_FIFO_OVF_MASK BIT(28)
127 #define QSPI_INTR_RDY_MASK BIT(29)
128 #define QSPI_INTR_RX_TX_FIFO_ERR (QSPI_INTR_RX_FIFO_UNF_MASK | \
129 QSPI_INTR_RX_FIFO_OVF_MASK | \
130 QSPI_INTR_TX_FIFO_UNF_MASK | \
131 QSPI_INTR_TX_FIFO_OVF_MASK)
132
133 #define QSPI_MISC_REG 0x194
134 #define QSPI_NUM_DUMMY_CYCLE(x) (((x) & 0xff) << 0)
135 #define QSPI_DUMMY_CYCLES_MAX 0xff
136
137 #define QSPI_CMB_SEQ_CMD 0x19c
138 #define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0)
139
140 #define QSPI_CMB_SEQ_CMD_CFG 0x1a0
141 #define QSPI_COMMAND_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13)
142 #define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13)
143 #define QSPI_COMMAND_SDR_DDR BIT(12)
144 #define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0)
145
146 #define QSPI_GLOBAL_CONFIG 0X1a4
147 #define QSPI_CMB_SEQ_EN BIT(0)
148 #define QSPI_TPM_WAIT_POLL_EN BIT(1)
149
150 #define QSPI_CMB_SEQ_ADDR 0x1a8
151 #define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0)
152
153 #define QSPI_CMB_SEQ_ADDR_CFG 0x1ac
154 #define QSPI_ADDRESS_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13)
155 #define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13)
156 #define QSPI_ADDRESS_SDR_DDR BIT(12)
157 #define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0)
158
159 #define DATA_DIR_TX BIT(0)
160 #define DATA_DIR_RX BIT(1)
161
162 #define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
163 #define DEFAULT_QSPI_DMA_BUF_LEN SZ_64K
164
165 enum tegra_qspi_transfer_type {
166 CMD_TRANSFER = 0,
167 ADDR_TRANSFER = 1,
168 DUMMY_TRANSFER = 2,
169 DATA_TRANSFER = 3
170 };
171
172 struct tegra_qspi_soc_data {
173 bool cmb_xfer_capable;
174 bool supports_tpm;
175 bool has_ext_dma;
176 unsigned int cs_count;
177 };
178
179 struct tegra_qspi_client_data {
180 int tx_clk_tap_delay;
181 int rx_clk_tap_delay;
182 };
183
184 struct tegra_qspi {
185 struct device *dev;
186 struct spi_controller *host;
187 /* lock to protect data accessed by irq */
188 spinlock_t lock;
189
190 struct clk *clk;
191 void __iomem *base;
192 phys_addr_t phys;
193 unsigned int irq;
194
195 u32 cur_speed;
196 unsigned int cur_pos;
197 unsigned int words_per_32bit;
198 unsigned int bytes_per_word;
199 unsigned int curr_dma_words;
200 unsigned int cur_direction;
201
202 unsigned int cur_rx_pos;
203 unsigned int cur_tx_pos;
204
205 unsigned int dma_buf_size;
206 unsigned int max_buf_size;
207 bool is_curr_dma_xfer;
208
209 struct completion rx_dma_complete;
210 struct completion tx_dma_complete;
211
212 u32 tx_status;
213 u32 rx_status;
214 u32 status_reg;
215 bool is_packed;
216 bool use_dma;
217
218 u32 command1_reg;
219 u32 dma_control_reg;
220 u32 def_command1_reg;
221 u32 def_command2_reg;
222 u32 spi_cs_timing1;
223 u32 spi_cs_timing2;
224 u8 dummy_cycles;
225
226 struct completion xfer_completion;
227 struct spi_transfer *curr_xfer;
228
229 struct dma_chan *rx_dma_chan;
230 u32 *rx_dma_buf;
231 dma_addr_t rx_dma_phys;
232 struct dma_async_tx_descriptor *rx_dma_desc;
233
234 struct dma_chan *tx_dma_chan;
235 u32 *tx_dma_buf;
236 dma_addr_t tx_dma_phys;
237 struct dma_async_tx_descriptor *tx_dma_desc;
238 const struct tegra_qspi_soc_data *soc_data;
239 };
240
tegra_qspi_readl(struct tegra_qspi * tqspi,unsigned long offset)241 static inline u32 tegra_qspi_readl(struct tegra_qspi *tqspi, unsigned long offset)
242 {
243 return readl(tqspi->base + offset);
244 }
245
tegra_qspi_writel(struct tegra_qspi * tqspi,u32 value,unsigned long offset)246 static inline void tegra_qspi_writel(struct tegra_qspi *tqspi, u32 value, unsigned long offset)
247 {
248 writel(value, tqspi->base + offset);
249
250 /* read back register to make sure that register writes completed */
251 if (offset != QSPI_TX_FIFO)
252 readl(tqspi->base + QSPI_COMMAND1);
253 }
254
tegra_qspi_mask_clear_irq(struct tegra_qspi * tqspi)255 static void tegra_qspi_mask_clear_irq(struct tegra_qspi *tqspi)
256 {
257 u32 value;
258
259 /* write 1 to clear status register */
260 value = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
261 tegra_qspi_writel(tqspi, value, QSPI_TRANS_STATUS);
262
263 value = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
264 if (!(value & QSPI_INTR_RDY_MASK)) {
265 value |= (QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
266 tegra_qspi_writel(tqspi, value, QSPI_INTR_MASK);
267 }
268
269 /* clear fifo status error if any */
270 value = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
271 if (value & QSPI_ERR)
272 tegra_qspi_writel(tqspi, QSPI_ERR | QSPI_FIFO_ERROR, QSPI_FIFO_STATUS);
273 }
274
275 static unsigned int
tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi * tqspi,struct spi_transfer * t)276 tegra_qspi_calculate_curr_xfer_param(struct tegra_qspi *tqspi, struct spi_transfer *t)
277 {
278 unsigned int max_word, max_len, total_fifo_words;
279 unsigned int remain_len = t->len - tqspi->cur_pos;
280 unsigned int bits_per_word = t->bits_per_word;
281
282 tqspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
283
284 /*
285 * Tegra QSPI controller supports packed or unpacked mode transfers.
286 * Packed mode is used for data transfers using 8, 16, or 32 bits per
287 * word with a minimum transfer of 1 word and for all other transfers
288 * unpacked mode will be used.
289 */
290
291 if ((bits_per_word == 8 || bits_per_word == 16 ||
292 bits_per_word == 32) && t->len > 3) {
293 tqspi->is_packed = true;
294 tqspi->words_per_32bit = 32 / bits_per_word;
295 } else {
296 tqspi->is_packed = false;
297 tqspi->words_per_32bit = 1;
298 }
299
300 if (tqspi->is_packed) {
301 max_len = min(remain_len, tqspi->max_buf_size);
302 tqspi->curr_dma_words = max_len / tqspi->bytes_per_word;
303 total_fifo_words = (max_len + 3) / 4;
304 } else {
305 max_word = (remain_len - 1) / tqspi->bytes_per_word + 1;
306 max_word = min(max_word, tqspi->max_buf_size / 4);
307 tqspi->curr_dma_words = max_word;
308 total_fifo_words = max_word;
309 }
310
311 return total_fifo_words;
312 }
313
314 static unsigned int
tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi * tqspi,struct spi_transfer * t)315 tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
316 {
317 unsigned int written_words, fifo_words_left, count;
318 unsigned int len, tx_empty_count, max_n_32bit, i;
319 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
320 u32 fifo_status;
321
322 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
323 tx_empty_count = QSPI_TX_FIFO_EMPTY_COUNT(fifo_status);
324
325 if (tqspi->is_packed) {
326 fifo_words_left = tx_empty_count * tqspi->words_per_32bit;
327 written_words = min(fifo_words_left, tqspi->curr_dma_words);
328 len = written_words * tqspi->bytes_per_word;
329 max_n_32bit = DIV_ROUND_UP(len, 4);
330 for (count = 0; count < max_n_32bit; count++) {
331 u32 x = 0;
332
333 for (i = 0; (i < 4) && len; i++, len--)
334 x |= (u32)(*tx_buf++) << (i * 8);
335 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
336 }
337
338 tqspi->cur_tx_pos += written_words * tqspi->bytes_per_word;
339 } else {
340 unsigned int write_bytes;
341 u8 bytes_per_word = tqspi->bytes_per_word;
342
343 max_n_32bit = min(tqspi->curr_dma_words, tx_empty_count);
344 written_words = max_n_32bit;
345 len = written_words * tqspi->bytes_per_word;
346 if (len > t->len - tqspi->cur_pos)
347 len = t->len - tqspi->cur_pos;
348 write_bytes = len;
349 for (count = 0; count < max_n_32bit; count++) {
350 u32 x = 0;
351
352 for (i = 0; len && (i < min(4, bytes_per_word)); i++, len--)
353 x |= (u32)(*tx_buf++) << (i * 8);
354 tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO);
355 }
356
357 tqspi->cur_tx_pos += write_bytes;
358 }
359
360 return written_words;
361 }
362
363 static unsigned int
tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi * tqspi,struct spi_transfer * t)364 tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
365 {
366 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
367 unsigned int len, rx_full_count, count, i;
368 unsigned int read_words = 0;
369 u32 fifo_status, x;
370
371 fifo_status = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
372 rx_full_count = QSPI_RX_FIFO_FULL_COUNT(fifo_status);
373 if (tqspi->is_packed) {
374 len = tqspi->curr_dma_words * tqspi->bytes_per_word;
375 for (count = 0; count < rx_full_count; count++) {
376 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO);
377
378 for (i = 0; len && (i < 4); i++, len--)
379 *rx_buf++ = (x >> i * 8) & 0xff;
380 }
381
382 read_words += tqspi->curr_dma_words;
383 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
384 } else {
385 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
386 u8 bytes_per_word = tqspi->bytes_per_word;
387 unsigned int read_bytes;
388
389 len = rx_full_count * bytes_per_word;
390 if (len > t->len - tqspi->cur_pos)
391 len = t->len - tqspi->cur_pos;
392 read_bytes = len;
393 for (count = 0; count < rx_full_count; count++) {
394 x = tegra_qspi_readl(tqspi, QSPI_RX_FIFO) & rx_mask;
395
396 for (i = 0; len && (i < bytes_per_word); i++, len--)
397 *rx_buf++ = (x >> (i * 8)) & 0xff;
398 }
399
400 read_words += rx_full_count;
401 tqspi->cur_rx_pos += read_bytes;
402 }
403
404 return read_words;
405 }
406
407 static void
tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi * tqspi,struct spi_transfer * t)408 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
409 {
410 /*
411 * In packed mode, each word in FIFO may contain multiple packets
412 * based on bits per word. So all bytes in each FIFO word are valid.
413 *
414 * In unpacked mode, each word in FIFO contains single packet and
415 * based on bits per word any remaining bits in FIFO word will be
416 * ignored by the hardware and are invalid bits.
417 */
418 if (tqspi->is_packed) {
419 tqspi->cur_tx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
420 } else {
421 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
422 unsigned int i, count, consume, write_bytes;
423
424 /*
425 * Fill tx_dma_buf to contain single packet in each word based
426 * on bits per word from SPI core tx_buf.
427 */
428 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
429 if (consume > t->len - tqspi->cur_pos)
430 consume = t->len - tqspi->cur_pos;
431 write_bytes = consume;
432 for (count = 0; count < tqspi->curr_dma_words; count++) {
433 u32 x = 0;
434
435 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
436 x |= (u32)(*tx_buf++) << (i * 8);
437 tqspi->tx_dma_buf[count] = x;
438 }
439
440 tqspi->cur_tx_pos += write_bytes;
441 }
442 }
443
444 static void
tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi * tqspi,struct spi_transfer * t)445 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
446 {
447 if (tqspi->is_packed) {
448 tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
449 } else {
450 unsigned char *rx_buf = t->rx_buf + tqspi->cur_rx_pos;
451 u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
452 unsigned int i, count, consume, read_bytes;
453
454 /*
455 * Each FIFO word contains single data packet.
456 * Skip invalid bits in each FIFO word based on bits per word
457 * and align bytes while filling in SPI core rx_buf.
458 */
459 consume = tqspi->curr_dma_words * tqspi->bytes_per_word;
460 if (consume > t->len - tqspi->cur_pos)
461 consume = t->len - tqspi->cur_pos;
462 read_bytes = consume;
463 for (count = 0; count < tqspi->curr_dma_words; count++) {
464 u32 x = tqspi->rx_dma_buf[count] & rx_mask;
465
466 for (i = 0; consume && (i < tqspi->bytes_per_word); i++, consume--)
467 *rx_buf++ = (x >> (i * 8)) & 0xff;
468 }
469
470 tqspi->cur_rx_pos += read_bytes;
471 }
472 }
473
tegra_qspi_dma_complete(void * args)474 static void tegra_qspi_dma_complete(void *args)
475 {
476 struct completion *dma_complete = args;
477
478 complete(dma_complete);
479 }
480
tegra_qspi_start_tx_dma(struct tegra_qspi * tqspi,struct spi_transfer * t,int len)481 static int tegra_qspi_start_tx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
482 {
483 dma_addr_t tx_dma_phys;
484
485 reinit_completion(&tqspi->tx_dma_complete);
486
487 if (tqspi->is_packed)
488 tx_dma_phys = t->tx_dma;
489 else
490 tx_dma_phys = tqspi->tx_dma_phys;
491
492 tqspi->tx_dma_desc = dmaengine_prep_slave_single(tqspi->tx_dma_chan, tx_dma_phys,
493 len, DMA_MEM_TO_DEV,
494 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
495
496 if (!tqspi->tx_dma_desc) {
497 dev_err(tqspi->dev, "Unable to get TX descriptor\n");
498 return -EIO;
499 }
500
501 tqspi->tx_dma_desc->callback = tegra_qspi_dma_complete;
502 tqspi->tx_dma_desc->callback_param = &tqspi->tx_dma_complete;
503 dmaengine_submit(tqspi->tx_dma_desc);
504 dma_async_issue_pending(tqspi->tx_dma_chan);
505
506 return 0;
507 }
508
tegra_qspi_start_rx_dma(struct tegra_qspi * tqspi,struct spi_transfer * t,int len)509 static int tegra_qspi_start_rx_dma(struct tegra_qspi *tqspi, struct spi_transfer *t, int len)
510 {
511 dma_addr_t rx_dma_phys;
512
513 reinit_completion(&tqspi->rx_dma_complete);
514
515 if (tqspi->is_packed)
516 rx_dma_phys = t->rx_dma;
517 else
518 rx_dma_phys = tqspi->rx_dma_phys;
519
520 tqspi->rx_dma_desc = dmaengine_prep_slave_single(tqspi->rx_dma_chan, rx_dma_phys,
521 len, DMA_DEV_TO_MEM,
522 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
523
524 if (!tqspi->rx_dma_desc) {
525 dev_err(tqspi->dev, "Unable to get RX descriptor\n");
526 return -EIO;
527 }
528
529 tqspi->rx_dma_desc->callback = tegra_qspi_dma_complete;
530 tqspi->rx_dma_desc->callback_param = &tqspi->rx_dma_complete;
531 dmaengine_submit(tqspi->rx_dma_desc);
532 dma_async_issue_pending(tqspi->rx_dma_chan);
533
534 return 0;
535 }
536
tegra_qspi_flush_fifos(struct tegra_qspi * tqspi,bool atomic)537 static int tegra_qspi_flush_fifos(struct tegra_qspi *tqspi, bool atomic)
538 {
539 void __iomem *addr = tqspi->base + QSPI_FIFO_STATUS;
540 u32 val;
541
542 val = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
543 if ((val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY)
544 return 0;
545
546 val |= QSPI_RX_FIFO_FLUSH | QSPI_TX_FIFO_FLUSH;
547 tegra_qspi_writel(tqspi, val, QSPI_FIFO_STATUS);
548
549 if (!atomic)
550 return readl_relaxed_poll_timeout(addr, val,
551 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
552 1000, 1000000);
553
554 return readl_relaxed_poll_timeout_atomic(addr, val,
555 (val & QSPI_FIFO_EMPTY) == QSPI_FIFO_EMPTY,
556 1000, 1000000);
557 }
558
tegra_qspi_unmask_irq(struct tegra_qspi * tqspi)559 static void tegra_qspi_unmask_irq(struct tegra_qspi *tqspi)
560 {
561 u32 intr_mask;
562
563 intr_mask = tegra_qspi_readl(tqspi, QSPI_INTR_MASK);
564 intr_mask &= ~(QSPI_INTR_RDY_MASK | QSPI_INTR_RX_TX_FIFO_ERR);
565 tegra_qspi_writel(tqspi, intr_mask, QSPI_INTR_MASK);
566 }
567
tegra_qspi_dma_map_xfer(struct tegra_qspi * tqspi,struct spi_transfer * t)568 static int tegra_qspi_dma_map_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
569 {
570 u8 *tx_buf = (u8 *)t->tx_buf + tqspi->cur_tx_pos;
571 u8 *rx_buf = (u8 *)t->rx_buf + tqspi->cur_rx_pos;
572 unsigned int len;
573
574 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
575
576 if (t->tx_buf) {
577 t->tx_dma = dma_map_single(tqspi->dev, (void *)tx_buf, len, DMA_TO_DEVICE);
578 if (dma_mapping_error(tqspi->dev, t->tx_dma))
579 return -ENOMEM;
580 }
581
582 if (t->rx_buf) {
583 t->rx_dma = dma_map_single(tqspi->dev, (void *)rx_buf, len, DMA_FROM_DEVICE);
584 if (dma_mapping_error(tqspi->dev, t->rx_dma)) {
585 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
586 return -ENOMEM;
587 }
588 }
589
590 return 0;
591 }
592
tegra_qspi_dma_unmap_xfer(struct tegra_qspi * tqspi,struct spi_transfer * t)593 static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
594 {
595 unsigned int len;
596
597 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
598
599 if (t->tx_buf)
600 dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
601 if (t->rx_buf)
602 dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
603 }
604
tegra_qspi_start_dma_based_transfer(struct tegra_qspi * tqspi,struct spi_transfer * t)605 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
606 {
607 struct dma_slave_config dma_sconfig = { 0 };
608 dma_addr_t rx_dma_phys, tx_dma_phys;
609 unsigned int len;
610 u8 dma_burst;
611 int ret = 0;
612 u32 val;
613
614 if (tqspi->is_packed) {
615 ret = tegra_qspi_dma_map_xfer(tqspi, t);
616 if (ret < 0)
617 return ret;
618 }
619
620 val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
621 tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
622
623 tegra_qspi_unmask_irq(tqspi);
624
625 if (tqspi->is_packed)
626 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
627 else
628 len = tqspi->curr_dma_words * 4;
629
630 /* set attention level based on length of transfer */
631 if (tqspi->soc_data->has_ext_dma) {
632 val = 0;
633 if (len & 0xf) {
634 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
635 dma_burst = 1;
636 } else if (((len) >> 4) & 0x1) {
637 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
638 dma_burst = 4;
639 } else {
640 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
641 dma_burst = 8;
642 }
643
644 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
645 }
646
647 tqspi->dma_control_reg = val;
648
649 dma_sconfig.device_fc = true;
650
651 if (tqspi->cur_direction & DATA_DIR_TX) {
652 if (tqspi->tx_dma_chan) {
653 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
654 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
655 dma_sconfig.dst_maxburst = dma_burst;
656 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
657 if (ret < 0) {
658 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
659 return ret;
660 }
661
662 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
663 ret = tegra_qspi_start_tx_dma(tqspi, t, len);
664 if (ret < 0) {
665 dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
666 return ret;
667 }
668 } else {
669 if (tqspi->is_packed)
670 tx_dma_phys = t->tx_dma;
671 else
672 tx_dma_phys = tqspi->tx_dma_phys;
673 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
674 tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys),
675 QSPI_DMA_MEM_ADDRESS);
676 tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff),
677 QSPI_DMA_HI_ADDRESS);
678 }
679 }
680
681 if (tqspi->cur_direction & DATA_DIR_RX) {
682 if (tqspi->rx_dma_chan) {
683 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
684 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
685 dma_sconfig.src_maxburst = dma_burst;
686 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
687 if (ret < 0) {
688 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
689 return ret;
690 }
691
692 ret = tegra_qspi_start_rx_dma(tqspi, t, len);
693 if (ret < 0) {
694 dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
695 if (tqspi->cur_direction & DATA_DIR_TX)
696 dmaengine_terminate_all(tqspi->tx_dma_chan);
697 return ret;
698 }
699 } else {
700 if (tqspi->is_packed)
701 rx_dma_phys = t->rx_dma;
702 else
703 rx_dma_phys = tqspi->rx_dma_phys;
704
705 tegra_qspi_writel(tqspi, lower_32_bits(rx_dma_phys),
706 QSPI_DMA_MEM_ADDRESS);
707 tegra_qspi_writel(tqspi, (upper_32_bits(rx_dma_phys) & 0xff),
708 QSPI_DMA_HI_ADDRESS);
709 }
710 }
711
712 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
713
714 tqspi->is_curr_dma_xfer = true;
715 tqspi->dma_control_reg = val;
716 val |= QSPI_DMA_EN;
717 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
718
719 return ret;
720 }
721
tegra_qspi_start_cpu_based_transfer(struct tegra_qspi * qspi,struct spi_transfer * t)722 static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct spi_transfer *t)
723 {
724 u32 val;
725 unsigned int cur_words;
726
727 if (qspi->cur_direction & DATA_DIR_TX)
728 cur_words = tegra_qspi_fill_tx_fifo_from_client_txbuf(qspi, t);
729 else
730 cur_words = qspi->curr_dma_words;
731
732 val = QSPI_DMA_BLK_SET(cur_words - 1);
733 tegra_qspi_writel(qspi, val, QSPI_DMA_BLK);
734
735 tegra_qspi_unmask_irq(qspi);
736
737 qspi->is_curr_dma_xfer = false;
738 val = qspi->command1_reg;
739 val |= QSPI_PIO;
740 tegra_qspi_writel(qspi, val, QSPI_COMMAND1);
741
742 return 0;
743 }
744
tegra_qspi_deinit_dma(struct tegra_qspi * tqspi)745 static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
746 {
747 if (tqspi->tx_dma_buf) {
748 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
749 tqspi->tx_dma_buf, tqspi->tx_dma_phys);
750 tqspi->tx_dma_buf = NULL;
751 }
752
753 if (tqspi->tx_dma_chan) {
754 dma_release_channel(tqspi->tx_dma_chan);
755 tqspi->tx_dma_chan = NULL;
756 }
757
758 if (tqspi->rx_dma_buf) {
759 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
760 tqspi->rx_dma_buf, tqspi->rx_dma_phys);
761 tqspi->rx_dma_buf = NULL;
762 }
763
764 if (tqspi->rx_dma_chan) {
765 dma_release_channel(tqspi->rx_dma_chan);
766 tqspi->rx_dma_chan = NULL;
767 }
768 }
769
tegra_qspi_init_dma(struct tegra_qspi * tqspi)770 static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
771 {
772 struct dma_chan *dma_chan;
773 dma_addr_t dma_phys;
774 u32 *dma_buf;
775 int err;
776
777 if (tqspi->soc_data->has_ext_dma) {
778 dma_chan = dma_request_chan(tqspi->dev, "rx");
779 if (IS_ERR(dma_chan)) {
780 err = PTR_ERR(dma_chan);
781 goto err_out;
782 }
783
784 tqspi->rx_dma_chan = dma_chan;
785
786 dma_chan = dma_request_chan(tqspi->dev, "tx");
787 if (IS_ERR(dma_chan)) {
788 err = PTR_ERR(dma_chan);
789 goto err_out;
790 }
791
792 tqspi->tx_dma_chan = dma_chan;
793 } else {
794 if (!device_iommu_mapped(tqspi->dev)) {
795 dev_warn(tqspi->dev,
796 "IOMMU not enabled in device-tree, falling back to PIO mode\n");
797 return 0;
798 }
799 }
800
801 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
802 if (!dma_buf) {
803 err = -ENOMEM;
804 goto err_out;
805 }
806
807 tqspi->rx_dma_buf = dma_buf;
808 tqspi->rx_dma_phys = dma_phys;
809
810 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
811 if (!dma_buf) {
812 err = -ENOMEM;
813 goto err_out;
814 }
815
816 tqspi->tx_dma_buf = dma_buf;
817 tqspi->tx_dma_phys = dma_phys;
818 tqspi->use_dma = true;
819
820 return 0;
821
822 err_out:
823 tegra_qspi_deinit_dma(tqspi);
824
825 if (err != -EPROBE_DEFER) {
826 dev_err(tqspi->dev, "cannot use DMA: %d\n", err);
827 dev_err(tqspi->dev, "falling back to PIO\n");
828 return 0;
829 }
830
831 return err;
832 }
833
tegra_qspi_setup_transfer_one(struct spi_device * spi,struct spi_transfer * t,bool is_first_of_msg)834 static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_transfer *t,
835 bool is_first_of_msg)
836 {
837 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
838 struct tegra_qspi_client_data *cdata = spi->controller_data;
839 u32 command1, command2, speed = t->speed_hz;
840 u8 bits_per_word = t->bits_per_word;
841 u32 tx_tap = 0, rx_tap = 0;
842 unsigned long flags;
843 int req_mode;
844
845 if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
846 clk_set_rate(tqspi->clk, speed);
847 tqspi->cur_speed = speed;
848 }
849
850 spin_lock_irqsave(&tqspi->lock, flags);
851 tqspi->cur_pos = 0;
852 tqspi->cur_rx_pos = 0;
853 tqspi->cur_tx_pos = 0;
854 tqspi->curr_xfer = t;
855 spin_unlock_irqrestore(&tqspi->lock, flags);
856
857 if (is_first_of_msg) {
858 tegra_qspi_mask_clear_irq(tqspi);
859
860 command1 = tqspi->def_command1_reg;
861 command1 |= QSPI_CS_SEL(spi_get_chipselect(spi, 0));
862 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
863
864 command1 &= ~QSPI_CONTROL_MODE_MASK;
865 req_mode = spi->mode & 0x3;
866 if (req_mode == SPI_MODE_3)
867 command1 |= QSPI_CONTROL_MODE_3;
868 else
869 command1 |= QSPI_CONTROL_MODE_0;
870
871 if (spi->mode & SPI_CS_HIGH)
872 command1 |= QSPI_CS_SW_VAL;
873 else
874 command1 &= ~QSPI_CS_SW_VAL;
875 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
876
877 if (cdata && cdata->tx_clk_tap_delay)
878 tx_tap = cdata->tx_clk_tap_delay;
879
880 if (cdata && cdata->rx_clk_tap_delay)
881 rx_tap = cdata->rx_clk_tap_delay;
882
883 command2 = QSPI_TX_TAP_DELAY(tx_tap) | QSPI_RX_TAP_DELAY(rx_tap);
884 if (command2 != tqspi->def_command2_reg)
885 tegra_qspi_writel(tqspi, command2, QSPI_COMMAND2);
886
887 } else {
888 command1 = tqspi->command1_reg;
889 command1 &= ~QSPI_BIT_LENGTH(~0);
890 command1 |= QSPI_BIT_LENGTH(bits_per_word - 1);
891 }
892
893 command1 &= ~QSPI_SDR_DDR_SEL;
894
895 return command1;
896 }
897
tegra_qspi_start_transfer_one(struct spi_device * spi,struct spi_transfer * t,u32 command1)898 static int tegra_qspi_start_transfer_one(struct spi_device *spi,
899 struct spi_transfer *t, u32 command1)
900 {
901 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
902 unsigned int total_fifo_words;
903 u8 bus_width = 0;
904 int ret;
905
906 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
907
908 command1 &= ~QSPI_PACKED;
909 if (tqspi->is_packed)
910 command1 |= QSPI_PACKED;
911 tegra_qspi_writel(tqspi, command1, QSPI_COMMAND1);
912
913 tqspi->cur_direction = 0;
914
915 command1 &= ~(QSPI_TX_EN | QSPI_RX_EN);
916 if (t->rx_buf) {
917 command1 |= QSPI_RX_EN;
918 tqspi->cur_direction |= DATA_DIR_RX;
919 bus_width = t->rx_nbits;
920 }
921
922 if (t->tx_buf) {
923 command1 |= QSPI_TX_EN;
924 tqspi->cur_direction |= DATA_DIR_TX;
925 bus_width = t->tx_nbits;
926 }
927
928 command1 &= ~QSPI_INTERFACE_WIDTH_MASK;
929
930 if (bus_width == SPI_NBITS_QUAD)
931 command1 |= QSPI_INTERFACE_WIDTH_QUAD;
932 else if (bus_width == SPI_NBITS_DUAL)
933 command1 |= QSPI_INTERFACE_WIDTH_DUAL;
934 else
935 command1 |= QSPI_INTERFACE_WIDTH_SINGLE;
936
937 tqspi->command1_reg = command1;
938
939 tegra_qspi_writel(tqspi, QSPI_NUM_DUMMY_CYCLE(tqspi->dummy_cycles), QSPI_MISC_REG);
940
941 ret = tegra_qspi_flush_fifos(tqspi, false);
942 if (ret < 0)
943 return ret;
944
945 if (tqspi->use_dma && total_fifo_words > QSPI_FIFO_DEPTH)
946 ret = tegra_qspi_start_dma_based_transfer(tqspi, t);
947 else
948 ret = tegra_qspi_start_cpu_based_transfer(tqspi, t);
949
950 return ret;
951 }
952
tegra_qspi_parse_cdata_dt(struct spi_device * spi)953 static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
954 {
955 struct tegra_qspi_client_data *cdata;
956 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
957
958 cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
959 if (!cdata)
960 return NULL;
961
962 device_property_read_u32(&spi->dev, "nvidia,tx-clk-tap-delay",
963 &cdata->tx_clk_tap_delay);
964 device_property_read_u32(&spi->dev, "nvidia,rx-clk-tap-delay",
965 &cdata->rx_clk_tap_delay);
966
967 return cdata;
968 }
969
tegra_qspi_setup(struct spi_device * spi)970 static int tegra_qspi_setup(struct spi_device *spi)
971 {
972 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
973 struct tegra_qspi_client_data *cdata = spi->controller_data;
974 unsigned long flags;
975 u32 val;
976 int ret;
977
978 ret = pm_runtime_resume_and_get(tqspi->dev);
979 if (ret < 0) {
980 dev_err(tqspi->dev, "failed to get runtime PM: %d\n", ret);
981 return ret;
982 }
983
984 if (!cdata) {
985 cdata = tegra_qspi_parse_cdata_dt(spi);
986 spi->controller_data = cdata;
987 }
988 spin_lock_irqsave(&tqspi->lock, flags);
989
990 /* keep default cs state to inactive */
991 val = tqspi->def_command1_reg;
992 val |= QSPI_CS_SEL(spi_get_chipselect(spi, 0));
993 if (spi->mode & SPI_CS_HIGH)
994 val &= ~QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
995 else
996 val |= QSPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
997
998 tqspi->def_command1_reg = val;
999 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1000
1001 spin_unlock_irqrestore(&tqspi->lock, flags);
1002
1003 pm_runtime_put(tqspi->dev);
1004
1005 return 0;
1006 }
1007
tegra_qspi_dump_regs(struct tegra_qspi * tqspi)1008 static void tegra_qspi_dump_regs(struct tegra_qspi *tqspi)
1009 {
1010 dev_dbg(tqspi->dev, "============ QSPI REGISTER DUMP ============\n");
1011 dev_dbg(tqspi->dev, "Command1: 0x%08x | Command2: 0x%08x\n",
1012 tegra_qspi_readl(tqspi, QSPI_COMMAND1),
1013 tegra_qspi_readl(tqspi, QSPI_COMMAND2));
1014 dev_dbg(tqspi->dev, "DMA_CTL: 0x%08x | DMA_BLK: 0x%08x\n",
1015 tegra_qspi_readl(tqspi, QSPI_DMA_CTL),
1016 tegra_qspi_readl(tqspi, QSPI_DMA_BLK));
1017 dev_dbg(tqspi->dev, "INTR_MASK: 0x%08x | MISC: 0x%08x\n",
1018 tegra_qspi_readl(tqspi, QSPI_INTR_MASK),
1019 tegra_qspi_readl(tqspi, QSPI_MISC_REG));
1020 dev_dbg(tqspi->dev, "TRANS_STAT: 0x%08x | FIFO_STATUS: 0x%08x\n",
1021 tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS),
1022 tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS));
1023 }
1024
tegra_qspi_reset(struct tegra_qspi * tqspi)1025 static void tegra_qspi_reset(struct tegra_qspi *tqspi)
1026 {
1027 if (device_reset(tqspi->dev) < 0) {
1028 dev_warn_once(tqspi->dev, "device reset failed\n");
1029 tegra_qspi_mask_clear_irq(tqspi);
1030 }
1031 }
1032
tegra_qspi_handle_error(struct tegra_qspi * tqspi)1033 static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
1034 {
1035 dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
1036 tegra_qspi_dump_regs(tqspi);
1037 tegra_qspi_flush_fifos(tqspi, true);
1038 tegra_qspi_reset(tqspi);
1039 }
1040
tegra_qspi_transfer_end(struct spi_device * spi)1041 static void tegra_qspi_transfer_end(struct spi_device *spi)
1042 {
1043 struct tegra_qspi *tqspi = spi_controller_get_devdata(spi->controller);
1044 int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
1045
1046 if (cs_val)
1047 tqspi->command1_reg |= QSPI_CS_SW_VAL;
1048 else
1049 tqspi->command1_reg &= ~QSPI_CS_SW_VAL;
1050 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1051 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1052 }
1053
1054 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi);
1055 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi);
1056
1057 /**
1058 * tegra_qspi_handle_timeout - Handle transfer timeout with hardware check
1059 * @tqspi: QSPI controller instance
1060 *
1061 * When a timeout occurs but hardware has completed the transfer (interrupt
1062 * was lost or delayed), manually trigger transfer completion processing.
1063 * This avoids failing transfers that actually succeeded.
1064 *
1065 * Returns: 0 if transfer was completed, -ETIMEDOUT if real timeout
1066 */
tegra_qspi_handle_timeout(struct tegra_qspi * tqspi)1067 static int tegra_qspi_handle_timeout(struct tegra_qspi *tqspi)
1068 {
1069 irqreturn_t ret;
1070 u32 status;
1071
1072 /* Check if hardware actually completed the transfer */
1073 status = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
1074 if (!(status & QSPI_RDY))
1075 return -ETIMEDOUT;
1076
1077 /*
1078 * Hardware completed but interrupt was lost/delayed. Manually
1079 * process the completion by calling the appropriate handler.
1080 */
1081 dev_warn_ratelimited(tqspi->dev,
1082 "QSPI interrupt timeout, but transfer complete\n");
1083
1084 /* Clear the transfer status */
1085 status = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
1086 tegra_qspi_writel(tqspi, status, QSPI_TRANS_STATUS);
1087
1088 /* Manually trigger completion handler */
1089 if (!tqspi->is_curr_dma_xfer)
1090 ret = handle_cpu_based_xfer(tqspi);
1091 else
1092 ret = handle_dma_based_xfer(tqspi);
1093
1094 return (ret == IRQ_HANDLED) ? 0 : -EIO;
1095 }
1096
tegra_qspi_cmd_config(bool is_ddr,u8 bus_width,u8 len)1097 static u32 tegra_qspi_cmd_config(bool is_ddr, u8 bus_width, u8 len)
1098 {
1099 u32 cmd_config = 0;
1100
1101 /* Extract Command configuration and value */
1102 if (is_ddr)
1103 cmd_config |= QSPI_COMMAND_SDR_DDR;
1104 else
1105 cmd_config &= ~QSPI_COMMAND_SDR_DDR;
1106
1107 cmd_config |= QSPI_COMMAND_X1_X2_X4(bus_width);
1108 cmd_config |= QSPI_COMMAND_SIZE_SET((len * 8) - 1);
1109
1110 return cmd_config;
1111 }
1112
tegra_qspi_addr_config(bool is_ddr,u8 bus_width,u8 len)1113 static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len)
1114 {
1115 u32 addr_config = 0;
1116
1117 if (is_ddr)
1118 addr_config |= QSPI_ADDRESS_SDR_DDR;
1119 else
1120 addr_config &= ~QSPI_ADDRESS_SDR_DDR;
1121
1122 addr_config |= QSPI_ADDRESS_X1_X2_X4(bus_width);
1123 addr_config |= QSPI_ADDRESS_SIZE_SET((len * 8) - 1);
1124
1125 return addr_config;
1126 }
1127
tegra_qspi_dma_stop(struct tegra_qspi * tqspi)1128 static void tegra_qspi_dma_stop(struct tegra_qspi *tqspi)
1129 {
1130 u32 value;
1131
1132 if ((tqspi->cur_direction & DATA_DIR_TX) && tqspi->tx_dma_chan)
1133 dmaengine_terminate_all(tqspi->tx_dma_chan);
1134
1135 if ((tqspi->cur_direction & DATA_DIR_RX) && tqspi->rx_dma_chan)
1136 dmaengine_terminate_all(tqspi->rx_dma_chan);
1137
1138 value = tegra_qspi_readl(tqspi, QSPI_DMA_CTL);
1139 value &= ~QSPI_DMA_EN;
1140 tegra_qspi_writel(tqspi, value, QSPI_DMA_CTL);
1141 }
1142
tegra_qspi_pio_stop(struct tegra_qspi * tqspi)1143 static void tegra_qspi_pio_stop(struct tegra_qspi *tqspi)
1144 {
1145 u32 value;
1146
1147 value = tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1148 value &= ~QSPI_PIO;
1149 tegra_qspi_writel(tqspi, value, QSPI_COMMAND1);
1150 }
1151
tegra_qspi_combined_seq_xfer(struct tegra_qspi * tqspi,struct spi_message * msg)1152 static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
1153 struct spi_message *msg)
1154 {
1155 bool is_first_msg = true;
1156 struct spi_transfer *xfer;
1157 struct spi_device *spi = msg->spi;
1158 u8 transfer_phase = 0;
1159 u32 cmd1 = 0;
1160 int ret = 0;
1161 u32 address_value = 0;
1162 u32 cmd_config = 0, addr_config = 0;
1163 u8 cmd_value = 0, val = 0;
1164 unsigned long flags;
1165
1166 /* Enable Combined sequence mode */
1167 val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1168 if (spi->mode & SPI_TPM_HW_FLOW) {
1169 if (tqspi->soc_data->supports_tpm)
1170 val |= QSPI_TPM_WAIT_POLL_EN;
1171 else
1172 return -EIO;
1173 }
1174 val |= QSPI_CMB_SEQ_EN;
1175 tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1176 /* Process individual transfer list */
1177 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1178 switch (transfer_phase) {
1179 case CMD_TRANSFER:
1180 /* X1 SDR mode */
1181 cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits,
1182 xfer->len);
1183 cmd_value = *((const u8 *)(xfer->tx_buf));
1184 break;
1185 case ADDR_TRANSFER:
1186 /* X1 SDR mode */
1187 addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits,
1188 xfer->len);
1189 address_value = *((const u32 *)(xfer->tx_buf));
1190 break;
1191 case DUMMY_TRANSFER:
1192 if (xfer->dummy_data) {
1193 tqspi->dummy_cycles = xfer->len * 8 / xfer->tx_nbits;
1194 break;
1195 }
1196 transfer_phase++;
1197 fallthrough;
1198 case DATA_TRANSFER:
1199 /* Program Command, Address value in register */
1200 tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD);
1201 tegra_qspi_writel(tqspi, address_value,
1202 QSPI_CMB_SEQ_ADDR);
1203 /* Program Command and Address config in register */
1204 tegra_qspi_writel(tqspi, cmd_config,
1205 QSPI_CMB_SEQ_CMD_CFG);
1206 tegra_qspi_writel(tqspi, addr_config,
1207 QSPI_CMB_SEQ_ADDR_CFG);
1208
1209 reinit_completion(&tqspi->xfer_completion);
1210 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer,
1211 is_first_msg);
1212 ret = tegra_qspi_start_transfer_one(spi, xfer,
1213 cmd1);
1214
1215 if (ret < 0) {
1216 dev_err(tqspi->dev, "Failed to start transfer-one: %d\n",
1217 ret);
1218 return ret;
1219 }
1220
1221 is_first_msg = false;
1222 ret = wait_for_completion_timeout
1223 (&tqspi->xfer_completion,
1224 QSPI_DMA_TIMEOUT);
1225
1226 if (WARN_ON_ONCE(ret == 0)) {
1227 /*
1228 * Check if hardware completed the transfer
1229 * even though interrupt was lost or delayed.
1230 * If so, process the completion and continue.
1231 */
1232 ret = tegra_qspi_handle_timeout(tqspi);
1233 if (ret < 0) {
1234 /* Real timeout - clean up and fail */
1235 dev_err(tqspi->dev, "transfer timeout\n");
1236
1237 /* Abort transfer by resetting pio/dma bit */
1238 if (tqspi->is_curr_dma_xfer)
1239 tegra_qspi_dma_stop(tqspi);
1240 else
1241 tegra_qspi_pio_stop(tqspi);
1242
1243 /* Reset controller if timeout happens */
1244 tegra_qspi_reset(tqspi);
1245
1246 ret = -EIO;
1247 goto exit;
1248 }
1249 }
1250
1251 if (tqspi->tx_status || tqspi->rx_status) {
1252 dev_err(tqspi->dev, "QSPI Transfer failed\n");
1253 tqspi->tx_status = 0;
1254 tqspi->rx_status = 0;
1255 ret = -EIO;
1256 goto exit;
1257 }
1258 break;
1259 default:
1260 ret = -EINVAL;
1261 goto exit;
1262 }
1263 msg->actual_length += xfer->len;
1264 if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) {
1265 tegra_qspi_transfer_end(spi);
1266 spi_transfer_delay_exec(xfer);
1267 }
1268 spin_lock_irqsave(&tqspi->lock, flags);
1269 tqspi->curr_xfer = NULL;
1270 spin_unlock_irqrestore(&tqspi->lock, flags);
1271 transfer_phase++;
1272 }
1273 ret = 0;
1274
1275 exit:
1276 spin_lock_irqsave(&tqspi->lock, flags);
1277 tqspi->curr_xfer = NULL;
1278 spin_unlock_irqrestore(&tqspi->lock, flags);
1279 msg->status = ret;
1280
1281 return ret;
1282 }
1283
tegra_qspi_non_combined_seq_xfer(struct tegra_qspi * tqspi,struct spi_message * msg)1284 static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
1285 struct spi_message *msg)
1286 {
1287 struct spi_device *spi = msg->spi;
1288 struct spi_transfer *transfer;
1289 bool is_first_msg = true;
1290 int ret = 0, val = 0;
1291 unsigned long flags;
1292
1293 msg->status = 0;
1294 msg->actual_length = 0;
1295 tqspi->tx_status = 0;
1296 tqspi->rx_status = 0;
1297
1298 /* Disable Combined sequence mode */
1299 val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
1300 val &= ~QSPI_CMB_SEQ_EN;
1301 if (tqspi->soc_data->supports_tpm)
1302 val &= ~QSPI_TPM_WAIT_POLL_EN;
1303 tegra_qspi_writel(tqspi, val, QSPI_GLOBAL_CONFIG);
1304 list_for_each_entry(transfer, &msg->transfers, transfer_list) {
1305 struct spi_transfer *xfer = transfer;
1306 u8 dummy_bytes = 0;
1307 u32 cmd1;
1308
1309 tqspi->dummy_cycles = 0;
1310 /*
1311 * Tegra QSPI hardware supports dummy bytes transfer after actual transfer
1312 * bytes based on programmed dummy clock cycles in the QSPI_MISC register.
1313 * So, check if the next transfer is dummy data transfer and program dummy
1314 * clock cycles along with the current transfer and skip next transfer.
1315 */
1316 if (!list_is_last(&xfer->transfer_list, &msg->transfers)) {
1317 struct spi_transfer *next_xfer;
1318
1319 next_xfer = list_next_entry(xfer, transfer_list);
1320 if (next_xfer->dummy_data) {
1321 u32 dummy_cycles = next_xfer->len * 8 / next_xfer->tx_nbits;
1322
1323 if (dummy_cycles <= QSPI_DUMMY_CYCLES_MAX) {
1324 tqspi->dummy_cycles = dummy_cycles;
1325 dummy_bytes = next_xfer->len;
1326 transfer = next_xfer;
1327 }
1328 }
1329 }
1330
1331 reinit_completion(&tqspi->xfer_completion);
1332
1333 cmd1 = tegra_qspi_setup_transfer_one(spi, xfer, is_first_msg);
1334
1335 ret = tegra_qspi_start_transfer_one(spi, xfer, cmd1);
1336 if (ret < 0) {
1337 dev_err(tqspi->dev, "failed to start transfer: %d\n", ret);
1338 goto complete_xfer;
1339 }
1340
1341 ret = wait_for_completion_timeout(&tqspi->xfer_completion,
1342 QSPI_DMA_TIMEOUT);
1343 if (WARN_ON(ret == 0)) {
1344 /*
1345 * Check if hardware completed the transfer even though
1346 * interrupt was lost or delayed. If so, process the
1347 * completion and continue.
1348 */
1349 ret = tegra_qspi_handle_timeout(tqspi);
1350 if (ret < 0) {
1351 /* Real timeout - clean up and fail */
1352 dev_err(tqspi->dev, "transfer timeout\n");
1353
1354 if (tqspi->is_curr_dma_xfer)
1355 tegra_qspi_dma_stop(tqspi);
1356
1357 tegra_qspi_handle_error(tqspi);
1358 ret = -EIO;
1359 goto complete_xfer;
1360 }
1361 }
1362
1363 if (tqspi->tx_status || tqspi->rx_status) {
1364 tegra_qspi_handle_error(tqspi);
1365 ret = -EIO;
1366 goto complete_xfer;
1367 }
1368
1369 msg->actual_length += xfer->len + dummy_bytes;
1370
1371 complete_xfer:
1372 spin_lock_irqsave(&tqspi->lock, flags);
1373 tqspi->curr_xfer = NULL;
1374 spin_unlock_irqrestore(&tqspi->lock, flags);
1375
1376 if (ret < 0) {
1377 tegra_qspi_transfer_end(spi);
1378 spi_transfer_delay_exec(xfer);
1379 goto exit;
1380 }
1381
1382 if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
1383 /* de-activate CS after last transfer only when cs_change is not set */
1384 if (!xfer->cs_change) {
1385 tegra_qspi_transfer_end(spi);
1386 spi_transfer_delay_exec(xfer);
1387 }
1388 } else if (xfer->cs_change) {
1389 /* de-activated CS between the transfers only when cs_change is set */
1390 tegra_qspi_transfer_end(spi);
1391 spi_transfer_delay_exec(xfer);
1392 }
1393 }
1394
1395 ret = 0;
1396 exit:
1397 msg->status = ret;
1398
1399 return ret;
1400 }
1401
tegra_qspi_validate_cmb_seq(struct tegra_qspi * tqspi,struct spi_message * msg)1402 static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
1403 struct spi_message *msg)
1404 {
1405 int transfer_count = 0;
1406 struct spi_transfer *xfer;
1407
1408 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1409 transfer_count++;
1410 }
1411 if (!tqspi->soc_data->cmb_xfer_capable)
1412 return false;
1413 if (transfer_count > 4 || transfer_count < 3)
1414 return false;
1415 xfer = list_first_entry(&msg->transfers, typeof(*xfer),
1416 transfer_list);
1417 if (xfer->len > 2)
1418 return false;
1419 xfer = list_next_entry(xfer, transfer_list);
1420 if (xfer->len > 4 || xfer->len < 3)
1421 return false;
1422 xfer = list_next_entry(xfer, transfer_list);
1423 if (transfer_count == 4) {
1424 if (xfer->dummy_data != 1)
1425 return false;
1426 if ((xfer->len * 8 / xfer->tx_nbits) > QSPI_DUMMY_CYCLES_MAX)
1427 return false;
1428 xfer = list_next_entry(xfer, transfer_list);
1429 }
1430 if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
1431 return false;
1432
1433 return true;
1434 }
1435
tegra_qspi_transfer_one_message(struct spi_controller * host,struct spi_message * msg)1436 static int tegra_qspi_transfer_one_message(struct spi_controller *host,
1437 struct spi_message *msg)
1438 {
1439 struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1440 int ret;
1441
1442 if (tegra_qspi_validate_cmb_seq(tqspi, msg))
1443 ret = tegra_qspi_combined_seq_xfer(tqspi, msg);
1444 else
1445 ret = tegra_qspi_non_combined_seq_xfer(tqspi, msg);
1446
1447 spi_finalize_current_message(host);
1448
1449 return ret;
1450 }
1451
handle_cpu_based_xfer(struct tegra_qspi * tqspi)1452 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
1453 {
1454 struct spi_transfer *t;
1455 unsigned long flags;
1456
1457 spin_lock_irqsave(&tqspi->lock, flags);
1458 t = tqspi->curr_xfer;
1459
1460 if (!t) {
1461 spin_unlock_irqrestore(&tqspi->lock, flags);
1462 return IRQ_HANDLED;
1463 }
1464
1465 if (tqspi->tx_status || tqspi->rx_status) {
1466 tegra_qspi_handle_error(tqspi);
1467 complete(&tqspi->xfer_completion);
1468 goto exit;
1469 }
1470
1471 if (tqspi->cur_direction & DATA_DIR_RX)
1472 tegra_qspi_read_rx_fifo_to_client_rxbuf(tqspi, t);
1473
1474 if (tqspi->cur_direction & DATA_DIR_TX)
1475 tqspi->cur_pos = tqspi->cur_tx_pos;
1476 else
1477 tqspi->cur_pos = tqspi->cur_rx_pos;
1478
1479 if (tqspi->cur_pos == t->len) {
1480 complete(&tqspi->xfer_completion);
1481 goto exit;
1482 }
1483
1484 tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1485 tegra_qspi_start_cpu_based_transfer(tqspi, t);
1486 exit:
1487 tqspi->curr_xfer = NULL;
1488 spin_unlock_irqrestore(&tqspi->lock, flags);
1489 return IRQ_HANDLED;
1490 }
1491
handle_dma_based_xfer(struct tegra_qspi * tqspi)1492 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1493 {
1494 struct spi_transfer *t;
1495 unsigned int total_fifo_words;
1496 unsigned long flags;
1497 long wait_status;
1498 int num_errors = 0;
1499
1500 if (tqspi->cur_direction & DATA_DIR_TX) {
1501 if (tqspi->tx_status) {
1502 if (tqspi->tx_dma_chan)
1503 dmaengine_terminate_all(tqspi->tx_dma_chan);
1504 num_errors++;
1505 } else if (tqspi->tx_dma_chan) {
1506 wait_status = wait_for_completion_interruptible_timeout(
1507 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
1508 if (wait_status <= 0) {
1509 dmaengine_terminate_all(tqspi->tx_dma_chan);
1510 dev_err(tqspi->dev, "failed TX DMA transfer\n");
1511 num_errors++;
1512 }
1513 }
1514 }
1515
1516 if (tqspi->cur_direction & DATA_DIR_RX) {
1517 if (tqspi->rx_status) {
1518 if (tqspi->rx_dma_chan)
1519 dmaengine_terminate_all(tqspi->rx_dma_chan);
1520 num_errors++;
1521 } else if (tqspi->rx_dma_chan) {
1522 wait_status = wait_for_completion_interruptible_timeout(
1523 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
1524 if (wait_status <= 0) {
1525 dmaengine_terminate_all(tqspi->rx_dma_chan);
1526 dev_err(tqspi->dev, "failed RX DMA transfer\n");
1527 num_errors++;
1528 }
1529 }
1530 }
1531
1532 spin_lock_irqsave(&tqspi->lock, flags);
1533 t = tqspi->curr_xfer;
1534
1535 if (!t) {
1536 spin_unlock_irqrestore(&tqspi->lock, flags);
1537 return IRQ_HANDLED;
1538 }
1539
1540 if (num_errors) {
1541 tegra_qspi_dma_unmap_xfer(tqspi, t);
1542 tegra_qspi_handle_error(tqspi);
1543 complete(&tqspi->xfer_completion);
1544 goto exit;
1545 }
1546
1547 if (tqspi->cur_direction & DATA_DIR_RX)
1548 tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(tqspi, t);
1549
1550 if (tqspi->cur_direction & DATA_DIR_TX)
1551 tqspi->cur_pos = tqspi->cur_tx_pos;
1552 else
1553 tqspi->cur_pos = tqspi->cur_rx_pos;
1554
1555 if (tqspi->cur_pos == t->len) {
1556 tegra_qspi_dma_unmap_xfer(tqspi, t);
1557 complete(&tqspi->xfer_completion);
1558 goto exit;
1559 }
1560
1561 tegra_qspi_dma_unmap_xfer(tqspi, t);
1562
1563 /* continue transfer in current message */
1564 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
1565 if (total_fifo_words > QSPI_FIFO_DEPTH)
1566 num_errors = tegra_qspi_start_dma_based_transfer(tqspi, t);
1567 else
1568 num_errors = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1569
1570 exit:
1571 spin_unlock_irqrestore(&tqspi->lock, flags);
1572 return IRQ_HANDLED;
1573 }
1574
tegra_qspi_isr_thread(int irq,void * context_data)1575 static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1576 {
1577 struct tegra_qspi *tqspi = context_data;
1578 unsigned long flags;
1579 u32 status;
1580
1581 /*
1582 * Read transfer status to check if interrupt was triggered by transfer
1583 * completion
1584 */
1585 status = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
1586
1587 /*
1588 * Occasionally the IRQ thread takes a long time to wake up (usually
1589 * when the CPU that it's running on is excessively busy) and we have
1590 * already reached the timeout before and cleaned up the timed out
1591 * transfer. Avoid any processing in that case and bail out early.
1592 *
1593 * If no transfer is in progress, check if this was a real interrupt
1594 * that the timeout handler already processed, or a spurious one.
1595 */
1596 spin_lock_irqsave(&tqspi->lock, flags);
1597 if (!tqspi->curr_xfer) {
1598 spin_unlock_irqrestore(&tqspi->lock, flags);
1599 /* Spurious interrupt - transfer not ready */
1600 if (!(status & QSPI_RDY))
1601 return IRQ_NONE;
1602 /* Real interrupt, already handled by timeout path */
1603 return IRQ_HANDLED;
1604 }
1605
1606 tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
1607
1608 if (tqspi->cur_direction & DATA_DIR_TX)
1609 tqspi->tx_status = tqspi->status_reg & (QSPI_TX_FIFO_UNF | QSPI_TX_FIFO_OVF);
1610
1611 if (tqspi->cur_direction & DATA_DIR_RX)
1612 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
1613
1614 tegra_qspi_mask_clear_irq(tqspi);
1615 spin_unlock_irqrestore(&tqspi->lock, flags);
1616
1617 /*
1618 * Lock is released here but handlers safely re-check curr_xfer under
1619 * lock before dereferencing.
1620 * DMA handler also needs to sleep in wait_for_completion_*(), which
1621 * cannot be done while holding spinlock.
1622 */
1623 if (!tqspi->is_curr_dma_xfer)
1624 return handle_cpu_based_xfer(tqspi);
1625
1626 return handle_dma_based_xfer(tqspi);
1627 }
1628
1629 static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
1630 .has_ext_dma = true,
1631 .cmb_xfer_capable = false,
1632 .supports_tpm = false,
1633 .cs_count = 1,
1634 };
1635
1636 static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
1637 .has_ext_dma = true,
1638 .cmb_xfer_capable = true,
1639 .supports_tpm = false,
1640 .cs_count = 1,
1641 };
1642
1643 static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
1644 .has_ext_dma = false,
1645 .cmb_xfer_capable = true,
1646 .supports_tpm = true,
1647 .cs_count = 1,
1648 };
1649
1650 static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
1651 .has_ext_dma = true,
1652 .cmb_xfer_capable = true,
1653 .supports_tpm = true,
1654 .cs_count = 4,
1655 };
1656
1657 static const struct of_device_id tegra_qspi_of_match[] = {
1658 {
1659 .compatible = "nvidia,tegra210-qspi",
1660 .data = &tegra210_qspi_soc_data,
1661 }, {
1662 .compatible = "nvidia,tegra186-qspi",
1663 .data = &tegra186_qspi_soc_data,
1664 }, {
1665 .compatible = "nvidia,tegra194-qspi",
1666 .data = &tegra186_qspi_soc_data,
1667 }, {
1668 .compatible = "nvidia,tegra234-qspi",
1669 .data = &tegra234_qspi_soc_data,
1670 }, {
1671 .compatible = "nvidia,tegra241-qspi",
1672 .data = &tegra241_qspi_soc_data,
1673 },
1674 {}
1675 };
1676
1677 MODULE_DEVICE_TABLE(of, tegra_qspi_of_match);
1678
1679 #ifdef CONFIG_ACPI
1680 static const struct acpi_device_id tegra_qspi_acpi_match[] = {
1681 {
1682 .id = "NVDA1213",
1683 .driver_data = (kernel_ulong_t)&tegra210_qspi_soc_data,
1684 }, {
1685 .id = "NVDA1313",
1686 .driver_data = (kernel_ulong_t)&tegra186_qspi_soc_data,
1687 }, {
1688 .id = "NVDA1413",
1689 .driver_data = (kernel_ulong_t)&tegra234_qspi_soc_data,
1690 }, {
1691 .id = "NVDA1513",
1692 .driver_data = (kernel_ulong_t)&tegra241_qspi_soc_data,
1693 },
1694 {}
1695 };
1696
1697 MODULE_DEVICE_TABLE(acpi, tegra_qspi_acpi_match);
1698 #endif
1699
tegra_qspi_probe(struct platform_device * pdev)1700 static int tegra_qspi_probe(struct platform_device *pdev)
1701 {
1702 struct spi_controller *host;
1703 struct tegra_qspi *tqspi;
1704 struct resource *r;
1705 int ret, qspi_irq;
1706 int bus_num;
1707
1708 host = devm_spi_alloc_host(&pdev->dev, sizeof(*tqspi));
1709 if (!host)
1710 return -ENOMEM;
1711
1712 platform_set_drvdata(pdev, host);
1713 tqspi = spi_controller_get_devdata(host);
1714
1715 host->mode_bits = SPI_MODE_0 | SPI_MODE_3 | SPI_CS_HIGH |
1716 SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
1717 host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
1718 host->flags = SPI_CONTROLLER_HALF_DUPLEX;
1719 host->setup = tegra_qspi_setup;
1720 host->transfer_one_message = tegra_qspi_transfer_one_message;
1721 host->num_chipselect = 1;
1722 host->auto_runtime_pm = true;
1723
1724 bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
1725 if (bus_num >= 0)
1726 host->bus_num = bus_num;
1727
1728 tqspi->host = host;
1729 tqspi->dev = &pdev->dev;
1730 spin_lock_init(&tqspi->lock);
1731
1732 tqspi->soc_data = device_get_match_data(&pdev->dev);
1733 host->num_chipselect = tqspi->soc_data->cs_count;
1734 tqspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
1735 if (IS_ERR(tqspi->base))
1736 return PTR_ERR(tqspi->base);
1737
1738 tqspi->phys = r->start;
1739 qspi_irq = platform_get_irq(pdev, 0);
1740 if (qspi_irq < 0)
1741 return qspi_irq;
1742 tqspi->irq = qspi_irq;
1743
1744 if (!has_acpi_companion(tqspi->dev)) {
1745 tqspi->clk = devm_clk_get(&pdev->dev, "qspi");
1746 if (IS_ERR(tqspi->clk)) {
1747 ret = PTR_ERR(tqspi->clk);
1748 dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
1749 return ret;
1750 }
1751
1752 }
1753
1754 tqspi->max_buf_size = QSPI_FIFO_DEPTH << 2;
1755 tqspi->dma_buf_size = DEFAULT_QSPI_DMA_BUF_LEN;
1756
1757 ret = tegra_qspi_init_dma(tqspi);
1758 if (ret < 0)
1759 return ret;
1760
1761 if (tqspi->use_dma)
1762 tqspi->max_buf_size = tqspi->dma_buf_size;
1763
1764 init_completion(&tqspi->tx_dma_complete);
1765 init_completion(&tqspi->rx_dma_complete);
1766 init_completion(&tqspi->xfer_completion);
1767
1768 pm_runtime_enable(&pdev->dev);
1769 ret = pm_runtime_resume_and_get(&pdev->dev);
1770 if (ret < 0) {
1771 dev_err(&pdev->dev, "failed to get runtime PM: %d\n", ret);
1772 goto exit_pm_disable;
1773 }
1774
1775 if (device_reset(tqspi->dev) < 0)
1776 dev_warn_once(tqspi->dev, "device reset failed\n");
1777
1778 tqspi->def_command1_reg = QSPI_M_S | QSPI_CS_SW_HW | QSPI_CS_SW_VAL;
1779 tegra_qspi_writel(tqspi, tqspi->def_command1_reg, QSPI_COMMAND1);
1780 tqspi->spi_cs_timing1 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING1);
1781 tqspi->spi_cs_timing2 = tegra_qspi_readl(tqspi, QSPI_CS_TIMING2);
1782 tqspi->def_command2_reg = tegra_qspi_readl(tqspi, QSPI_COMMAND2);
1783
1784 pm_runtime_put(&pdev->dev);
1785
1786 ret = request_threaded_irq(tqspi->irq, NULL,
1787 tegra_qspi_isr_thread, IRQF_ONESHOT,
1788 dev_name(&pdev->dev), tqspi);
1789 if (ret < 0) {
1790 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", tqspi->irq, ret);
1791 goto exit_pm_disable;
1792 }
1793
1794 ret = spi_register_controller(host);
1795 if (ret < 0) {
1796 dev_err(&pdev->dev, "failed to register host: %d\n", ret);
1797 goto exit_free_irq;
1798 }
1799
1800 return 0;
1801
1802 exit_free_irq:
1803 free_irq(qspi_irq, tqspi);
1804 exit_pm_disable:
1805 pm_runtime_force_suspend(&pdev->dev);
1806 tegra_qspi_deinit_dma(tqspi);
1807 return ret;
1808 }
1809
tegra_qspi_remove(struct platform_device * pdev)1810 static void tegra_qspi_remove(struct platform_device *pdev)
1811 {
1812 struct spi_controller *host = platform_get_drvdata(pdev);
1813 struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1814
1815 spi_unregister_controller(host);
1816 free_irq(tqspi->irq, tqspi);
1817 pm_runtime_force_suspend(&pdev->dev);
1818 tegra_qspi_deinit_dma(tqspi);
1819 }
1820
tegra_qspi_suspend(struct device * dev)1821 static int __maybe_unused tegra_qspi_suspend(struct device *dev)
1822 {
1823 struct spi_controller *host = dev_get_drvdata(dev);
1824
1825 return spi_controller_suspend(host);
1826 }
1827
tegra_qspi_resume(struct device * dev)1828 static int __maybe_unused tegra_qspi_resume(struct device *dev)
1829 {
1830 struct spi_controller *host = dev_get_drvdata(dev);
1831 struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1832 int ret;
1833
1834 ret = pm_runtime_resume_and_get(dev);
1835 if (ret < 0) {
1836 dev_err(dev, "failed to get runtime PM: %d\n", ret);
1837 return ret;
1838 }
1839
1840 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
1841 tegra_qspi_writel(tqspi, tqspi->def_command2_reg, QSPI_COMMAND2);
1842 pm_runtime_put(dev);
1843
1844 return spi_controller_resume(host);
1845 }
1846
tegra_qspi_runtime_suspend(struct device * dev)1847 static int __maybe_unused tegra_qspi_runtime_suspend(struct device *dev)
1848 {
1849 struct spi_controller *host = dev_get_drvdata(dev);
1850 struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1851
1852 /* Runtime pm disabled with ACPI */
1853 if (has_acpi_companion(tqspi->dev))
1854 return 0;
1855 /* flush all write which are in PPSB queue by reading back */
1856 tegra_qspi_readl(tqspi, QSPI_COMMAND1);
1857
1858 clk_disable_unprepare(tqspi->clk);
1859
1860 return 0;
1861 }
1862
tegra_qspi_runtime_resume(struct device * dev)1863 static int __maybe_unused tegra_qspi_runtime_resume(struct device *dev)
1864 {
1865 struct spi_controller *host = dev_get_drvdata(dev);
1866 struct tegra_qspi *tqspi = spi_controller_get_devdata(host);
1867 int ret;
1868
1869 /* Runtime pm disabled with ACPI */
1870 if (has_acpi_companion(tqspi->dev))
1871 return 0;
1872 ret = clk_prepare_enable(tqspi->clk);
1873 if (ret < 0)
1874 dev_err(tqspi->dev, "failed to enable clock: %d\n", ret);
1875
1876 return ret;
1877 }
1878
1879 static const struct dev_pm_ops tegra_qspi_pm_ops = {
1880 SET_RUNTIME_PM_OPS(tegra_qspi_runtime_suspend, tegra_qspi_runtime_resume, NULL)
1881 SET_SYSTEM_SLEEP_PM_OPS(tegra_qspi_suspend, tegra_qspi_resume)
1882 };
1883
1884 static struct platform_driver tegra_qspi_driver = {
1885 .driver = {
1886 .name = "tegra-qspi",
1887 .pm = &tegra_qspi_pm_ops,
1888 .of_match_table = tegra_qspi_of_match,
1889 .acpi_match_table = ACPI_PTR(tegra_qspi_acpi_match),
1890 },
1891 .probe = tegra_qspi_probe,
1892 .remove = tegra_qspi_remove,
1893 };
1894 module_platform_driver(tegra_qspi_driver);
1895
1896 MODULE_ALIAS("platform:qspi-tegra");
1897 MODULE_DESCRIPTION("NVIDIA Tegra QSPI Controller Driver");
1898 MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
1899 MODULE_LICENSE("GPL v2");
1900