1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
3
4 #include <linux/clk.h>
5 #include <linux/dmaengine.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/dma/qcom-gpi-dma.h>
8 #include <linux/interrupt.h>
9 #include <linux/io.h>
10 #include <linux/log2.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/property.h>
16 #include <linux/soc/qcom/geni-se.h>
17 #include <linux/spi/spi.h>
18 #include <linux/spinlock.h>
19
20 /* SPI SE specific registers and respective register fields */
21 #define SE_SPI_CPHA 0x224
22 #define CPHA BIT(0)
23
24 #define SE_SPI_LOOPBACK 0x22c
25 #define LOOPBACK_ENABLE 0x1
26 #define NORMAL_MODE 0x0
27 #define LOOPBACK_MSK GENMASK(1, 0)
28
29 #define SE_SPI_CPOL 0x230
30 #define CPOL BIT(2)
31
32 #define SE_SPI_DEMUX_OUTPUT_INV 0x24c
33 #define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0)
34
35 #define SE_SPI_DEMUX_SEL 0x250
36 #define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
37
38 #define SE_SPI_TRANS_CFG 0x25c
39 #define CS_TOGGLE BIT(1)
40
41 #define SE_SPI_WORD_LEN 0x268
42 #define WORD_LEN_MSK GENMASK(9, 0)
43 #define MIN_WORD_LEN 4
44
45 #define SE_SPI_TX_TRANS_LEN 0x26c
46 #define SE_SPI_RX_TRANS_LEN 0x270
47 #define TRANS_LEN_MSK GENMASK(23, 0)
48
49 #define SE_SPI_PRE_POST_CMD_DLY 0x274
50
51 #define SE_SPI_DELAY_COUNTERS 0x278
52 #define SPI_INTER_WORDS_DELAY_MSK GENMASK(9, 0)
53 #define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10)
54 #define SPI_CS_CLK_DELAY_SHFT 10
55
56 #define SE_SPI_SLAVE_EN (0x2BC)
57 #define SPI_SLAVE_EN BIT(0)
58
59 /* M_CMD OP codes for SPI */
60 #define SPI_TX_ONLY 1
61 #define SPI_RX_ONLY 2
62 #define SPI_TX_RX 7
63 #define SPI_CS_ASSERT 8
64 #define SPI_CS_DEASSERT 9
65 #define SPI_SCK_ONLY 10
66 /* M_CMD params for SPI */
67 #define SPI_PRE_CMD_DELAY BIT(0)
68 #define TIMESTAMP_BEFORE BIT(1)
69 #define FRAGMENTATION BIT(2)
70 #define TIMESTAMP_AFTER BIT(3)
71 #define POST_CMD_DELAY BIT(4)
72
73 #define GSI_LOOPBACK_EN BIT(0)
74 #define GSI_CS_TOGGLE BIT(3)
75 #define GSI_CPHA BIT(4)
76 #define GSI_CPOL BIT(5)
77
78 struct spi_geni_master {
79 struct geni_se se;
80 struct device *dev;
81 u32 tx_fifo_depth;
82 u32 fifo_width_bits;
83 u32 tx_wm;
84 u32 last_mode;
85 u8 last_cs;
86 unsigned long cur_speed_hz;
87 unsigned long cur_sclk_hz;
88 unsigned int cur_bits_per_word;
89 unsigned int tx_rem_bytes;
90 unsigned int rx_rem_bytes;
91 const struct spi_transfer *cur_xfer;
92 struct completion cs_done;
93 struct completion cancel_done;
94 struct completion abort_done;
95 struct completion tx_reset_done;
96 struct completion rx_reset_done;
97 unsigned int oversampling;
98 spinlock_t lock;
99 int irq;
100 bool cs_flag;
101 bool abort_failed;
102 struct dma_chan *tx;
103 struct dma_chan *rx;
104 int cur_xfer_mode;
105 };
106
spi_slv_setup(struct spi_geni_master * mas)107 static void spi_slv_setup(struct spi_geni_master *mas)
108 {
109 struct geni_se *se = &mas->se;
110
111 writel(SPI_SLAVE_EN, se->base + SE_SPI_SLAVE_EN);
112 writel(GENI_IO_MUX_0_EN, se->base + GENI_OUTPUT_CTRL);
113 writel(START_TRIGGER, se->base + SE_GENI_CFG_SEQ_START);
114 dev_dbg(mas->dev, "spi slave setup done\n");
115 }
116
get_spi_clk_cfg(unsigned int speed_hz,struct spi_geni_master * mas,unsigned int * clk_idx,unsigned int * clk_div)117 static int get_spi_clk_cfg(unsigned int speed_hz,
118 struct spi_geni_master *mas,
119 unsigned int *clk_idx,
120 unsigned int *clk_div)
121 {
122 unsigned long sclk_freq;
123 unsigned int actual_hz;
124 int ret;
125
126 ret = geni_se_clk_freq_match(&mas->se,
127 speed_hz * mas->oversampling,
128 clk_idx, &sclk_freq, false);
129 if (ret) {
130 dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
131 ret, speed_hz);
132 return ret;
133 }
134
135 *clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
136 actual_hz = sclk_freq / (mas->oversampling * *clk_div);
137
138 dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
139 actual_hz, sclk_freq, *clk_idx, *clk_div);
140 ret = dev_pm_opp_set_rate(mas->dev, sclk_freq);
141 if (ret)
142 dev_err(mas->dev, "dev_pm_opp_set_rate failed %d\n", ret);
143 else
144 mas->cur_sclk_hz = sclk_freq;
145
146 return ret;
147 }
148
handle_se_timeout(struct spi_controller * spi)149 static void handle_se_timeout(struct spi_controller *spi)
150 {
151 struct spi_geni_master *mas = spi_controller_get_devdata(spi);
152 unsigned long time_left;
153 struct geni_se *se = &mas->se;
154 const struct spi_transfer *xfer;
155
156 spin_lock_irq(&mas->lock);
157 if (mas->cur_xfer_mode == GENI_SE_FIFO)
158 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
159
160 xfer = mas->cur_xfer;
161 mas->cur_xfer = NULL;
162
163 /* The controller doesn't support the Cancel commnand in target mode */
164 if (!spi->target) {
165 reinit_completion(&mas->cancel_done);
166 geni_se_cancel_m_cmd(se);
167
168 spin_unlock_irq(&mas->lock);
169
170 time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
171 if (time_left)
172 goto reset_if_dma;
173
174 spin_lock_irq(&mas->lock);
175 }
176
177 reinit_completion(&mas->abort_done);
178 geni_se_abort_m_cmd(se);
179 spin_unlock_irq(&mas->lock);
180
181 time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
182 if (!time_left) {
183 dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
184
185 /*
186 * No need for a lock since SPI core has a lock and we never
187 * access this from an interrupt.
188 */
189 mas->abort_failed = true;
190 }
191
192 reset_if_dma:
193 if (mas->cur_xfer_mode == GENI_SE_DMA) {
194 if (xfer) {
195 if (xfer->tx_buf) {
196 spin_lock_irq(&mas->lock);
197 reinit_completion(&mas->tx_reset_done);
198 writel(1, se->base + SE_DMA_TX_FSM_RST);
199 spin_unlock_irq(&mas->lock);
200 time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
201 if (!time_left)
202 dev_err(mas->dev, "DMA TX RESET failed\n");
203 }
204 if (xfer->rx_buf) {
205 spin_lock_irq(&mas->lock);
206 reinit_completion(&mas->rx_reset_done);
207 writel(1, se->base + SE_DMA_RX_FSM_RST);
208 spin_unlock_irq(&mas->lock);
209 time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
210 if (!time_left)
211 dev_err(mas->dev, "DMA RX RESET failed\n");
212 }
213 } else {
214 /*
215 * This can happen if a timeout happened and we had to wait
216 * for lock in this function because isr was holding the lock
217 * and handling transfer completion at that time.
218 */
219 dev_warn(mas->dev, "Cancel/Abort on completed SPI transfer\n");
220 }
221 }
222 }
223
handle_gpi_timeout(struct spi_controller * spi)224 static void handle_gpi_timeout(struct spi_controller *spi)
225 {
226 struct spi_geni_master *mas = spi_controller_get_devdata(spi);
227
228 dmaengine_terminate_sync(mas->tx);
229 dmaengine_terminate_sync(mas->rx);
230 }
231
spi_geni_handle_err(struct spi_controller * spi,struct spi_message * msg)232 static void spi_geni_handle_err(struct spi_controller *spi, struct spi_message *msg)
233 {
234 struct spi_geni_master *mas = spi_controller_get_devdata(spi);
235
236 switch (mas->cur_xfer_mode) {
237 case GENI_SE_FIFO:
238 case GENI_SE_DMA:
239 handle_se_timeout(spi);
240 break;
241 case GENI_GPI_DMA:
242 handle_gpi_timeout(spi);
243 break;
244 default:
245 dev_err(mas->dev, "Abort on Mode:%d not supported", mas->cur_xfer_mode);
246 }
247 }
248
spi_geni_is_abort_still_pending(struct spi_geni_master * mas)249 static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
250 {
251 struct geni_se *se = &mas->se;
252 u32 m_irq, m_irq_en;
253
254 if (!mas->abort_failed)
255 return false;
256
257 /*
258 * The only known case where a transfer times out and then a cancel
259 * times out then an abort times out is if something is blocking our
260 * interrupt handler from running. Avoid starting any new transfers
261 * until that sorts itself out.
262 */
263 spin_lock_irq(&mas->lock);
264 m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
265 m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
266 spin_unlock_irq(&mas->lock);
267
268 if (m_irq & m_irq_en) {
269 dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
270 m_irq & m_irq_en);
271 return true;
272 }
273
274 /*
275 * If we're here the problem resolved itself so no need to check more
276 * on future transfers.
277 */
278 mas->abort_failed = false;
279
280 return false;
281 }
282
spi_setup_word_len(struct spi_geni_master * mas,u16 mode,unsigned int bits_per_word)283 static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
284 unsigned int bits_per_word)
285 {
286 unsigned int pack_words;
287 bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
288 struct geni_se *se = &mas->se;
289 u32 word_len;
290
291 /*
292 * If bits_per_word isn't a byte aligned value, set the packing to be
293 * 1 SPI word per FIFO word.
294 */
295 if (!(mas->fifo_width_bits % bits_per_word))
296 pack_words = mas->fifo_width_bits / bits_per_word;
297 else
298 pack_words = 1;
299 geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
300 true, true);
301 word_len = (bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK;
302 writel(word_len, se->base + SE_SPI_WORD_LEN);
303 }
304
geni_spi_set_clock_and_bw(struct spi_geni_master * mas,unsigned long clk_hz)305 static int geni_spi_set_clock_and_bw(struct spi_geni_master *mas,
306 unsigned long clk_hz)
307 {
308 u32 clk_sel, m_clk_cfg, idx, div;
309 struct geni_se *se = &mas->se;
310 int ret;
311
312 if (clk_hz == mas->cur_speed_hz)
313 return 0;
314
315 ret = get_spi_clk_cfg(clk_hz, mas, &idx, &div);
316 if (ret) {
317 dev_err(mas->dev, "Err setting clk to %lu: %d\n", clk_hz, ret);
318 return ret;
319 }
320
321 /*
322 * SPI core clock gets configured with the requested frequency
323 * or the frequency closer to the requested frequency.
324 * For that reason requested frequency is stored in the
325 * cur_speed_hz and referred in the consecutive transfer instead
326 * of calling clk_get_rate() API.
327 */
328 mas->cur_speed_hz = clk_hz;
329
330 clk_sel = idx & CLK_SEL_MSK;
331 m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
332 writel(clk_sel, se->base + SE_GENI_CLK_SEL);
333 writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
334
335 /* Set BW quota for CPU as driver supports FIFO mode only. */
336 se->icc_paths[CPU_TO_GENI].avg_bw = Bps_to_icc(mas->cur_speed_hz);
337 ret = geni_icc_set_bw(se);
338 if (ret)
339 return ret;
340
341 return 0;
342 }
343
setup_fifo_params(struct spi_device * spi_slv,struct spi_controller * spi)344 static int setup_fifo_params(struct spi_device *spi_slv,
345 struct spi_controller *spi)
346 {
347 struct spi_geni_master *mas = spi_controller_get_devdata(spi);
348 struct geni_se *se = &mas->se;
349 u8 chipselect = spi_get_chipselect(spi_slv, 0);
350 bool cs_changed = (mas->last_cs != chipselect);
351 u32 mode_changed = mas->last_mode ^ spi_slv->mode;
352
353 mas->last_cs = chipselect;
354 mas->last_mode = spi_slv->mode;
355
356 if (mode_changed & SPI_LSB_FIRST)
357 mas->cur_bits_per_word = 0; /* force next setup_se_xfer to call spi_setup_word_len */
358 if (mode_changed & SPI_LOOP)
359 writel((spi_slv->mode & SPI_LOOP) ? LOOPBACK_ENABLE : 0, se->base + SE_SPI_LOOPBACK);
360 if (cs_changed)
361 writel(chipselect, se->base + SE_SPI_DEMUX_SEL);
362 if (mode_changed & SPI_CPHA)
363 writel((spi_slv->mode & SPI_CPHA) ? CPHA : 0, se->base + SE_SPI_CPHA);
364 if (mode_changed & SPI_CPOL)
365 writel((spi_slv->mode & SPI_CPOL) ? CPOL : 0, se->base + SE_SPI_CPOL);
366 if ((mode_changed & SPI_CS_HIGH) || (cs_changed && (spi_slv->mode & SPI_CS_HIGH)))
367 writel((spi_slv->mode & SPI_CS_HIGH) ? BIT(chipselect) : 0, se->base + SE_SPI_DEMUX_OUTPUT_INV);
368
369 return 0;
370 }
371
372 static void
spi_gsi_callback_result(void * cb,const struct dmaengine_result * result)373 spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
374 {
375 struct spi_controller *spi = cb;
376
377 spi->cur_msg->status = -EIO;
378 if (result->result != DMA_TRANS_NOERROR) {
379 dev_err(&spi->dev, "DMA txn failed: %d\n", result->result);
380 spi_finalize_current_transfer(spi);
381 return;
382 }
383
384 if (!result->residue) {
385 spi->cur_msg->status = 0;
386 dev_dbg(&spi->dev, "DMA txn completed\n");
387 } else {
388 dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue);
389 }
390
391 spi_finalize_current_transfer(spi);
392 }
393
setup_gsi_xfer(struct spi_transfer * xfer,struct spi_geni_master * mas,struct spi_device * spi_slv,struct spi_controller * spi)394 static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
395 struct spi_device *spi_slv, struct spi_controller *spi)
396 {
397 unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
398 struct dma_slave_config config = {};
399 struct gpi_spi_config peripheral = {};
400 struct dma_async_tx_descriptor *tx_desc, *rx_desc;
401 int ret;
402
403 config.peripheral_config = &peripheral;
404 config.peripheral_size = sizeof(peripheral);
405 peripheral.set_config = true;
406
407 if (xfer->bits_per_word != mas->cur_bits_per_word ||
408 xfer->speed_hz != mas->cur_speed_hz) {
409 mas->cur_bits_per_word = xfer->bits_per_word;
410 mas->cur_speed_hz = xfer->speed_hz;
411 }
412
413 if (xfer->tx_buf && xfer->rx_buf) {
414 peripheral.cmd = SPI_DUPLEX;
415 } else if (xfer->tx_buf) {
416 peripheral.cmd = SPI_TX;
417 peripheral.rx_len = 0;
418 } else if (xfer->rx_buf) {
419 peripheral.cmd = SPI_RX;
420 if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
421 peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
422 } else {
423 int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
424
425 peripheral.rx_len = (xfer->len / bytes_per_word);
426 }
427 }
428
429 peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP);
430 peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL);
431 peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA);
432 peripheral.cs = spi_get_chipselect(spi_slv, 0);
433 peripheral.pack_en = true;
434 peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN;
435
436 ret = get_spi_clk_cfg(mas->cur_speed_hz, mas,
437 &peripheral.clk_src, &peripheral.clk_div);
438 if (ret) {
439 dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n", ret);
440 return ret;
441 }
442
443 if (!xfer->cs_change) {
444 if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
445 peripheral.fragmentation = FRAGMENTATION;
446 }
447
448 if (peripheral.cmd & SPI_RX) {
449 dmaengine_slave_config(mas->rx, &config);
450 rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
451 DMA_DEV_TO_MEM, flags);
452 if (!rx_desc) {
453 dev_err(mas->dev, "Err setting up rx desc\n");
454 return -EIO;
455 }
456 }
457
458 /*
459 * Prepare the TX always, even for RX or tx_buf being null, we would
460 * need TX to be prepared per GSI spec
461 */
462 dmaengine_slave_config(mas->tx, &config);
463 tx_desc = dmaengine_prep_slave_sg(mas->tx, xfer->tx_sg.sgl, xfer->tx_sg.nents,
464 DMA_MEM_TO_DEV, flags);
465 if (!tx_desc) {
466 dev_err(mas->dev, "Err setting up tx desc\n");
467 return -EIO;
468 }
469
470 tx_desc->callback_result = spi_gsi_callback_result;
471 tx_desc->callback_param = spi;
472
473 if (peripheral.cmd & SPI_RX)
474 dmaengine_submit(rx_desc);
475 dmaengine_submit(tx_desc);
476
477 if (peripheral.cmd & SPI_RX)
478 dma_async_issue_pending(mas->rx);
479
480 dma_async_issue_pending(mas->tx);
481 return 1;
482 }
483
get_xfer_len_in_words(struct spi_transfer * xfer,struct spi_geni_master * mas)484 static u32 get_xfer_len_in_words(struct spi_transfer *xfer,
485 struct spi_geni_master *mas)
486 {
487 u32 len;
488
489 if (!(xfer->bits_per_word % MIN_WORD_LEN))
490 len = xfer->len * BITS_PER_BYTE / xfer->bits_per_word;
491 else
492 len = xfer->len / (xfer->bits_per_word / BITS_PER_BYTE + 1);
493 len &= TRANS_LEN_MSK;
494
495 return len;
496 }
497
geni_can_dma(struct spi_controller * ctlr,struct spi_device * slv,struct spi_transfer * xfer)498 static bool geni_can_dma(struct spi_controller *ctlr,
499 struct spi_device *slv, struct spi_transfer *xfer)
500 {
501 struct spi_geni_master *mas = spi_controller_get_devdata(slv->controller);
502 u32 len, fifo_size;
503
504 if (mas->cur_xfer_mode == GENI_GPI_DMA)
505 return true;
506
507 /* Set SE DMA mode for SPI target. */
508 if (ctlr->target)
509 return true;
510
511 len = get_xfer_len_in_words(xfer, mas);
512 fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / xfer->bits_per_word;
513
514 if (len > fifo_size)
515 return true;
516 else
517 return false;
518 }
519
spi_geni_prepare_message(struct spi_controller * spi,struct spi_message * spi_msg)520 static int spi_geni_prepare_message(struct spi_controller *spi,
521 struct spi_message *spi_msg)
522 {
523 struct spi_geni_master *mas = spi_controller_get_devdata(spi);
524 int ret;
525
526 switch (mas->cur_xfer_mode) {
527 case GENI_SE_FIFO:
528 case GENI_SE_DMA:
529 if (spi_geni_is_abort_still_pending(mas))
530 return -EBUSY;
531 ret = setup_fifo_params(spi_msg->spi, spi);
532 if (ret)
533 dev_err(mas->dev, "Couldn't select mode %d\n", ret);
534 return ret;
535
536 case GENI_GPI_DMA:
537 /* nothing to do for GPI DMA */
538 return 0;
539 }
540
541 dev_err(mas->dev, "Mode not supported %d", mas->cur_xfer_mode);
542 return -EINVAL;
543 }
544
spi_geni_release_dma_chan(void * data)545 static void spi_geni_release_dma_chan(void *data)
546 {
547 struct spi_geni_master *mas = data;
548
549 if (mas->rx) {
550 dma_release_channel(mas->rx);
551 mas->rx = NULL;
552 }
553
554 if (mas->tx) {
555 dma_release_channel(mas->tx);
556 mas->tx = NULL;
557 }
558 }
559
spi_geni_grab_gpi_chan(struct spi_geni_master * mas)560 static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
561 {
562 int ret;
563
564 mas->tx = dma_request_chan(mas->dev, "tx");
565 if (IS_ERR(mas->tx)) {
566 ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx),
567 "Failed to get tx DMA ch\n");
568 goto err_tx;
569 }
570
571 mas->rx = dma_request_chan(mas->dev, "rx");
572 if (IS_ERR(mas->rx)) {
573 ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx),
574 "Failed to get rx DMA ch\n");
575 goto err_rx;
576 }
577
578 ret = devm_add_action_or_reset(mas->dev, spi_geni_release_dma_chan, mas);
579 if (ret) {
580 dev_err(mas->dev, "Unable to add action.\n");
581 return ret;
582 }
583
584 return 0;
585
586 err_rx:
587 mas->rx = NULL;
588 dma_release_channel(mas->tx);
589 err_tx:
590 mas->tx = NULL;
591 return ret;
592 }
593
spi_geni_init(struct spi_geni_master * mas)594 static int spi_geni_init(struct spi_geni_master *mas)
595 {
596 struct spi_controller *spi = dev_get_drvdata(mas->dev);
597 struct geni_se *se = &mas->se;
598 unsigned int proto, major, minor, ver;
599 u32 spi_tx_cfg, fifo_disable;
600 int ret = -ENXIO;
601
602 pm_runtime_get_sync(mas->dev);
603
604 proto = geni_se_read_proto(se);
605
606 if (spi->target) {
607 if (proto != GENI_SE_SPI_SLAVE) {
608 dev_err(mas->dev, "Invalid proto %d\n", proto);
609 goto out_pm;
610 }
611 spi_slv_setup(mas);
612 } else if (proto == GENI_SE_INVALID_PROTO) {
613 ret = geni_load_se_firmware(se, GENI_SE_SPI);
614 if (ret) {
615 dev_err(mas->dev, "spi master firmware load failed ret: %d\n", ret);
616 goto out_pm;
617 }
618 } else if (proto != GENI_SE_SPI) {
619 dev_err(mas->dev, "Invalid proto %d\n", proto);
620 goto out_pm;
621 }
622 mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
623
624 /* Width of Tx and Rx FIFO is same */
625 mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
626
627 /*
628 * Hardware programming guide suggests to configure
629 * RX FIFO RFR level to fifo_depth-2.
630 */
631 geni_se_init(se, mas->tx_fifo_depth - 3, mas->tx_fifo_depth - 2);
632 /* Transmit an entire FIFO worth of data per IRQ */
633 mas->tx_wm = 1;
634 ver = geni_se_get_qup_hw_version(se);
635 major = GENI_SE_VERSION_MAJOR(ver);
636 minor = GENI_SE_VERSION_MINOR(ver);
637
638 if (major == 1 && minor == 0)
639 mas->oversampling = 2;
640 else
641 mas->oversampling = 1;
642
643 fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
644 switch (fifo_disable) {
645 case 1:
646 ret = spi_geni_grab_gpi_chan(mas);
647 if (!ret) { /* success case */
648 mas->cur_xfer_mode = GENI_GPI_DMA;
649 geni_se_select_mode(se, GENI_GPI_DMA);
650 dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
651 break;
652 } else if (ret == -EPROBE_DEFER) {
653 goto out_pm;
654 }
655 /*
656 * in case of failure to get gpi dma channel, we can still do the
657 * FIFO mode, so fallthrough
658 */
659 dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
660 fallthrough;
661
662 case 0:
663 mas->cur_xfer_mode = GENI_SE_FIFO;
664 geni_se_select_mode(se, GENI_SE_FIFO);
665 /* setup_fifo_params assumes that these registers start with a zero value */
666 writel(0, se->base + SE_SPI_LOOPBACK);
667 writel(0, se->base + SE_SPI_DEMUX_SEL);
668 writel(0, se->base + SE_SPI_CPHA);
669 writel(0, se->base + SE_SPI_CPOL);
670 writel(0, se->base + SE_SPI_DEMUX_OUTPUT_INV);
671 ret = 0;
672 break;
673 }
674
675 /* We never control CS manually */
676 if (!spi->target) {
677 spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
678 spi_tx_cfg &= ~CS_TOGGLE;
679 writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
680 }
681
682 out_pm:
683 pm_runtime_put(mas->dev);
684 return ret;
685 }
686
geni_byte_per_fifo_word(struct spi_geni_master * mas)687 static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
688 {
689 /*
690 * Calculate how many bytes we'll put in each FIFO word. If the
691 * transfer words don't pack cleanly into a FIFO word we'll just put
692 * one transfer word in each FIFO word. If they do pack we'll pack 'em.
693 */
694 if (mas->fifo_width_bits % mas->cur_bits_per_word)
695 return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
696 BITS_PER_BYTE));
697
698 return mas->fifo_width_bits / BITS_PER_BYTE;
699 }
700
geni_spi_handle_tx(struct spi_geni_master * mas)701 static bool geni_spi_handle_tx(struct spi_geni_master *mas)
702 {
703 struct geni_se *se = &mas->se;
704 unsigned int max_bytes;
705 const u8 *tx_buf;
706 unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
707 unsigned int i = 0;
708
709 /* Stop the watermark IRQ if nothing to send */
710 if (!mas->cur_xfer) {
711 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
712 return false;
713 }
714
715 max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
716 if (mas->tx_rem_bytes < max_bytes)
717 max_bytes = mas->tx_rem_bytes;
718
719 tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
720 while (i < max_bytes) {
721 unsigned int j;
722 unsigned int bytes_to_write;
723 u32 fifo_word = 0;
724 u8 *fifo_byte = (u8 *)&fifo_word;
725
726 bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
727 for (j = 0; j < bytes_to_write; j++)
728 fifo_byte[j] = tx_buf[i++];
729 iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
730 }
731 mas->tx_rem_bytes -= max_bytes;
732 if (!mas->tx_rem_bytes) {
733 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
734 return false;
735 }
736 return true;
737 }
738
geni_spi_handle_rx(struct spi_geni_master * mas)739 static void geni_spi_handle_rx(struct spi_geni_master *mas)
740 {
741 struct geni_se *se = &mas->se;
742 u32 rx_fifo_status;
743 unsigned int rx_bytes;
744 unsigned int rx_last_byte_valid;
745 u8 *rx_buf;
746 unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
747 unsigned int i = 0;
748
749 rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
750 rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
751 if (rx_fifo_status & RX_LAST) {
752 rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
753 rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
754 if (rx_last_byte_valid && rx_last_byte_valid < 4)
755 rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
756 }
757
758 /* Clear out the FIFO and bail if nowhere to put it */
759 if (!mas->cur_xfer) {
760 for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
761 readl(se->base + SE_GENI_RX_FIFOn);
762 return;
763 }
764
765 if (mas->rx_rem_bytes < rx_bytes)
766 rx_bytes = mas->rx_rem_bytes;
767
768 rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
769 while (i < rx_bytes) {
770 u32 fifo_word = 0;
771 u8 *fifo_byte = (u8 *)&fifo_word;
772 unsigned int bytes_to_read;
773 unsigned int j;
774
775 bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
776 ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
777 for (j = 0; j < bytes_to_read; j++)
778 rx_buf[i++] = fifo_byte[j];
779 }
780 mas->rx_rem_bytes -= rx_bytes;
781 }
782
setup_se_xfer(struct spi_transfer * xfer,struct spi_geni_master * mas,u16 mode,struct spi_controller * spi)783 static int setup_se_xfer(struct spi_transfer *xfer,
784 struct spi_geni_master *mas,
785 u16 mode, struct spi_controller *spi)
786 {
787 u32 m_cmd = 0;
788 u32 m_params = 0;
789 u32 len;
790 struct geni_se *se = &mas->se;
791 int ret;
792
793 /*
794 * Ensure that our interrupt handler isn't still running from some
795 * prior command before we start messing with the hardware behind
796 * its back. We don't need to _keep_ the lock here since we're only
797 * worried about racing with out interrupt handler. The SPI core
798 * already handles making sure that we're not trying to do two
799 * transfers at once or setting a chip select and doing a transfer
800 * concurrently.
801 *
802 * NOTE: we actually _can't_ hold the lock here because possibly we
803 * might call clk_set_rate() which needs to be able to sleep.
804 */
805 spin_lock_irq(&mas->lock);
806 spin_unlock_irq(&mas->lock);
807
808 if (xfer->bits_per_word != mas->cur_bits_per_word) {
809 spi_setup_word_len(mas, mode, xfer->bits_per_word);
810 mas->cur_bits_per_word = xfer->bits_per_word;
811 }
812
813 /* Speed and bits per word can be overridden per transfer */
814 ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
815 if (ret)
816 return ret;
817
818 mas->tx_rem_bytes = 0;
819 mas->rx_rem_bytes = 0;
820
821 len = get_xfer_len_in_words(xfer, mas);
822
823 mas->cur_xfer = xfer;
824 if (xfer->tx_buf) {
825 m_cmd |= SPI_TX_ONLY;
826 mas->tx_rem_bytes = xfer->len;
827 writel(len, se->base + SE_SPI_TX_TRANS_LEN);
828 }
829
830 if (xfer->rx_buf) {
831 m_cmd |= SPI_RX_ONLY;
832 writel(len, se->base + SE_SPI_RX_TRANS_LEN);
833 mas->rx_rem_bytes = xfer->len;
834 }
835
836 /*
837 * Select DMA mode if sgt are present; and with only 1 entry
838 * This is not a serious limitation because the xfer buffers are
839 * expected to fit into in 1 entry almost always, and if any
840 * doesn't for any reason we fall back to FIFO mode anyway
841 */
842 if (!xfer->tx_sg.nents && !xfer->rx_sg.nents)
843 mas->cur_xfer_mode = GENI_SE_FIFO;
844 else if (xfer->tx_sg.nents > 1 || xfer->rx_sg.nents > 1) {
845 dev_warn_once(mas->dev, "Doing FIFO, cannot handle tx_nents-%d, rx_nents-%d\n",
846 xfer->tx_sg.nents, xfer->rx_sg.nents);
847 mas->cur_xfer_mode = GENI_SE_FIFO;
848 } else
849 mas->cur_xfer_mode = GENI_SE_DMA;
850 geni_se_select_mode(se, mas->cur_xfer_mode);
851
852 if (!xfer->cs_change) {
853 if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
854 m_params = FRAGMENTATION;
855 }
856
857 /*
858 * Lock around right before we start the transfer since our
859 * interrupt could come in at any time now.
860 */
861 spin_lock_irq(&mas->lock);
862 geni_se_setup_m_cmd(se, m_cmd, m_params);
863
864 if (mas->cur_xfer_mode == GENI_SE_DMA) {
865 if (m_cmd & SPI_RX_ONLY)
866 geni_se_rx_init_dma(se, sg_dma_address(xfer->rx_sg.sgl),
867 sg_dma_len(xfer->rx_sg.sgl));
868 if (m_cmd & SPI_TX_ONLY)
869 geni_se_tx_init_dma(se, sg_dma_address(xfer->tx_sg.sgl),
870 sg_dma_len(xfer->tx_sg.sgl));
871 } else if (m_cmd & SPI_TX_ONLY) {
872 if (geni_spi_handle_tx(mas))
873 writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
874 }
875
876 spin_unlock_irq(&mas->lock);
877 return ret;
878 }
879
spi_geni_transfer_one(struct spi_controller * spi,struct spi_device * slv,struct spi_transfer * xfer)880 static int spi_geni_transfer_one(struct spi_controller *spi,
881 struct spi_device *slv,
882 struct spi_transfer *xfer)
883 {
884 struct spi_geni_master *mas = spi_controller_get_devdata(spi);
885 int ret;
886
887 if (spi_geni_is_abort_still_pending(mas))
888 return -EBUSY;
889
890 /* Terminate and return success for 0 byte length transfer */
891 if (!xfer->len)
892 return 0;
893
894 if (mas->cur_xfer_mode == GENI_SE_FIFO || mas->cur_xfer_mode == GENI_SE_DMA) {
895 ret = setup_se_xfer(xfer, mas, slv->mode, spi);
896 /* SPI framework expects +ve ret code to wait for transfer complete */
897 if (!ret)
898 ret = 1;
899 return ret;
900 }
901 return setup_gsi_xfer(xfer, mas, slv, spi);
902 }
903
geni_spi_isr(int irq,void * data)904 static irqreturn_t geni_spi_isr(int irq, void *data)
905 {
906 struct spi_controller *spi = data;
907 struct spi_geni_master *mas = spi_controller_get_devdata(spi);
908 struct geni_se *se = &mas->se;
909 u32 m_irq, dma_tx_status, dma_rx_status;
910
911 m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
912 dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT);
913 dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT);
914
915 if (!m_irq && !dma_tx_status && !dma_rx_status)
916 return IRQ_NONE;
917
918 if (m_irq & (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |
919 M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN |
920 M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN))
921 dev_warn(mas->dev, "Unexpected IRQ err status %#010x\n", m_irq);
922
923 spin_lock(&mas->lock);
924
925 if (mas->cur_xfer_mode == GENI_SE_FIFO) {
926 if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
927 geni_spi_handle_rx(mas);
928
929 if (m_irq & M_TX_FIFO_WATERMARK_EN)
930 geni_spi_handle_tx(mas);
931
932 if (m_irq & M_CMD_DONE_EN) {
933 if (mas->cur_xfer) {
934 spi_finalize_current_transfer(spi);
935 mas->cur_xfer = NULL;
936 /*
937 * If this happens, then a CMD_DONE came before all the
938 * Tx buffer bytes were sent out. This is unusual, log
939 * this condition and disable the WM interrupt to
940 * prevent the system from stalling due an interrupt
941 * storm.
942 *
943 * If this happens when all Rx bytes haven't been
944 * received, log the condition. The only known time
945 * this can happen is if bits_per_word != 8 and some
946 * registers that expect xfer lengths in num spi_words
947 * weren't written correctly.
948 */
949 if (mas->tx_rem_bytes) {
950 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
951 dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
952 mas->tx_rem_bytes, mas->cur_bits_per_word);
953 }
954 if (mas->rx_rem_bytes)
955 dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
956 mas->rx_rem_bytes, mas->cur_bits_per_word);
957 } else {
958 complete(&mas->cs_done);
959 }
960 }
961 } else if (mas->cur_xfer_mode == GENI_SE_DMA) {
962 const struct spi_transfer *xfer = mas->cur_xfer;
963
964 if (dma_tx_status)
965 writel(dma_tx_status, se->base + SE_DMA_TX_IRQ_CLR);
966 if (dma_rx_status)
967 writel(dma_rx_status, se->base + SE_DMA_RX_IRQ_CLR);
968 if (dma_tx_status & TX_DMA_DONE)
969 mas->tx_rem_bytes = 0;
970 if (dma_rx_status & RX_DMA_DONE)
971 mas->rx_rem_bytes = 0;
972 if (dma_tx_status & TX_RESET_DONE)
973 complete(&mas->tx_reset_done);
974 if (dma_rx_status & RX_RESET_DONE)
975 complete(&mas->rx_reset_done);
976 if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
977 spi_finalize_current_transfer(spi);
978 mas->cur_xfer = NULL;
979 }
980 }
981
982 if (m_irq & M_CMD_CANCEL_EN)
983 complete(&mas->cancel_done);
984 if (m_irq & M_CMD_ABORT_EN)
985 complete(&mas->abort_done);
986
987 /*
988 * It's safe or a good idea to Ack all of our interrupts at the end
989 * of the function. Specifically:
990 * - M_CMD_DONE_EN / M_RX_FIFO_LAST_EN: Edge triggered interrupts and
991 * clearing Acks. Clearing at the end relies on nobody else having
992 * started a new transfer yet or else we could be clearing _their_
993 * done bit, but everyone grabs the spinlock before starting a new
994 * transfer.
995 * - M_RX_FIFO_WATERMARK_EN / M_TX_FIFO_WATERMARK_EN: These appear
996 * to be "latched level" interrupts so it's important to clear them
997 * _after_ you've handled the condition and always safe to do so
998 * since they'll re-assert if they're still happening.
999 */
1000 writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
1001
1002 spin_unlock(&mas->lock);
1003
1004 return IRQ_HANDLED;
1005 }
1006
spi_geni_target_abort(struct spi_controller * spi)1007 static int spi_geni_target_abort(struct spi_controller *spi)
1008 {
1009 if (!spi->cur_msg)
1010 return 0;
1011
1012 handle_se_timeout(spi);
1013 spi_finalize_current_transfer(spi);
1014
1015 return 0;
1016 }
1017
spi_geni_probe(struct platform_device * pdev)1018 static int spi_geni_probe(struct platform_device *pdev)
1019 {
1020 int ret, irq;
1021 struct spi_controller *spi;
1022 struct spi_geni_master *mas;
1023 void __iomem *base;
1024 struct clk *clk;
1025 struct device *dev = &pdev->dev;
1026
1027 irq = platform_get_irq(pdev, 0);
1028 if (irq < 0)
1029 return irq;
1030
1031 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1032 if (ret)
1033 return dev_err_probe(dev, ret, "could not set DMA mask\n");
1034
1035 base = devm_platform_ioremap_resource(pdev, 0);
1036 if (IS_ERR(base))
1037 return PTR_ERR(base);
1038
1039 clk = devm_clk_get(dev, "se");
1040 if (IS_ERR(clk))
1041 return PTR_ERR(clk);
1042
1043 if (device_property_read_bool(dev, "spi-slave"))
1044 spi = devm_spi_alloc_target(dev, sizeof(*mas));
1045 else
1046 spi = devm_spi_alloc_host(dev, sizeof(*mas));
1047
1048 if (!spi)
1049 return -ENOMEM;
1050
1051 platform_set_drvdata(pdev, spi);
1052 mas = spi_controller_get_devdata(spi);
1053 mas->irq = irq;
1054 mas->dev = dev;
1055 mas->se.dev = dev;
1056 mas->se.wrapper = dev_get_drvdata(dev->parent);
1057 mas->se.base = base;
1058 mas->se.clk = clk;
1059
1060 ret = devm_pm_opp_set_clkname(&pdev->dev, "se");
1061 if (ret)
1062 return ret;
1063 /* OPP table is optional */
1064 ret = devm_pm_opp_of_add_table(&pdev->dev);
1065 if (ret && ret != -ENODEV) {
1066 dev_err(&pdev->dev, "invalid OPP table in device tree\n");
1067 return ret;
1068 }
1069
1070 spi->bus_num = -1;
1071 spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
1072 spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1073 spi->num_chipselect = 4;
1074 spi->max_speed_hz = 50000000;
1075 spi->max_dma_len = 0xffff0; /* 24 bits for tx/rx dma length */
1076 spi->prepare_message = spi_geni_prepare_message;
1077 spi->transfer_one = spi_geni_transfer_one;
1078 spi->can_dma = geni_can_dma;
1079 spi->dma_map_dev = dev->parent;
1080 spi->auto_runtime_pm = true;
1081 spi->handle_err = spi_geni_handle_err;
1082 spi->use_gpio_descriptors = true;
1083
1084 init_completion(&mas->cs_done);
1085 init_completion(&mas->cancel_done);
1086 init_completion(&mas->abort_done);
1087 init_completion(&mas->tx_reset_done);
1088 init_completion(&mas->rx_reset_done);
1089 spin_lock_init(&mas->lock);
1090
1091 if (spi->target)
1092 spi->target_abort = spi_geni_target_abort;
1093
1094 ret = geni_icc_get(&mas->se, NULL);
1095 if (ret)
1096 return ret;
1097
1098 pm_runtime_use_autosuspend(&pdev->dev);
1099 pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
1100 ret = devm_pm_runtime_enable(dev);
1101 if (ret)
1102 return ret;
1103
1104 /* Set the bus quota to a reasonable value for register access */
1105 mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
1106 mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
1107
1108 ret = geni_icc_set_bw(&mas->se);
1109 if (ret)
1110 return ret;
1111
1112 ret = spi_geni_init(mas);
1113 if (ret)
1114 return ret;
1115
1116 /*
1117 * TX is required per GSI spec, see setup_gsi_xfer().
1118 */
1119 if (mas->cur_xfer_mode == GENI_GPI_DMA)
1120 spi->flags = SPI_CONTROLLER_MUST_TX;
1121
1122 ret = devm_request_irq(dev, mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
1123 if (ret)
1124 return ret;
1125
1126 return devm_spi_register_controller(dev, spi);
1127 }
1128
spi_geni_runtime_suspend(struct device * dev)1129 static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
1130 {
1131 struct spi_controller *spi = dev_get_drvdata(dev);
1132 struct spi_geni_master *mas = spi_controller_get_devdata(spi);
1133 int ret;
1134
1135 /* Drop the performance state vote */
1136 dev_pm_opp_set_rate(dev, 0);
1137
1138 ret = geni_se_resources_off(&mas->se);
1139 if (ret)
1140 return ret;
1141
1142 return geni_icc_disable(&mas->se);
1143 }
1144
spi_geni_runtime_resume(struct device * dev)1145 static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
1146 {
1147 struct spi_controller *spi = dev_get_drvdata(dev);
1148 struct spi_geni_master *mas = spi_controller_get_devdata(spi);
1149 int ret;
1150
1151 ret = geni_icc_enable(&mas->se);
1152 if (ret)
1153 return ret;
1154
1155 ret = geni_se_resources_on(&mas->se);
1156 if (ret)
1157 return ret;
1158
1159 return dev_pm_opp_set_rate(mas->dev, mas->cur_sclk_hz);
1160 }
1161
spi_geni_suspend(struct device * dev)1162 static int __maybe_unused spi_geni_suspend(struct device *dev)
1163 {
1164 struct spi_controller *spi = dev_get_drvdata(dev);
1165 int ret;
1166
1167 ret = spi_controller_suspend(spi);
1168 if (ret)
1169 return ret;
1170
1171 ret = pm_runtime_force_suspend(dev);
1172 if (ret)
1173 spi_controller_resume(spi);
1174
1175 return ret;
1176 }
1177
spi_geni_resume(struct device * dev)1178 static int __maybe_unused spi_geni_resume(struct device *dev)
1179 {
1180 struct spi_controller *spi = dev_get_drvdata(dev);
1181 int ret;
1182
1183 ret = pm_runtime_force_resume(dev);
1184 if (ret)
1185 return ret;
1186
1187 ret = spi_controller_resume(spi);
1188 if (ret)
1189 pm_runtime_force_suspend(dev);
1190
1191 return ret;
1192 }
1193
1194 static const struct dev_pm_ops spi_geni_pm_ops = {
1195 SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
1196 spi_geni_runtime_resume, NULL)
1197 SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
1198 };
1199
1200 static const struct of_device_id spi_geni_dt_match[] = {
1201 { .compatible = "qcom,geni-spi" },
1202 {}
1203 };
1204 MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
1205
1206 static struct platform_driver spi_geni_driver = {
1207 .probe = spi_geni_probe,
1208 .driver = {
1209 .name = "geni_spi",
1210 .pm = &spi_geni_pm_ops,
1211 .of_match_table = spi_geni_dt_match,
1212 },
1213 };
1214 module_platform_driver(spi_geni_driver);
1215
1216 MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
1217 MODULE_LICENSE("GPL v2");
1218