1 /*-
2 * Copyright (c) 2017-2018 Ruslan Bukin <br@bsdpad.com>
3 * All rights reserved.
4 *
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Cadence Quad SPI Flash Controller driver.
33 * 4B-addressing mode supported only.
34 */
35
36 #include <sys/cdefs.h>
37 #include "opt_platform.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bio.h>
42 #include <sys/bus.h>
43 #include <sys/conf.h>
44 #include <sys/kernel.h>
45 #include <sys/kthread.h>
46 #include <sys/lock.h>
47 #include <sys/mbuf.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/mutex.h>
51 #include <sys/rman.h>
52 #include <geom/geom_disk.h>
53
54 #include <machine/bus.h>
55
56 #include <dev/fdt/simplebus.h>
57 #include <dev/fdt/fdt_common.h>
58 #include <dev/ofw/ofw_bus_subr.h>
59 #include <dev/ofw/openfirm.h>
60
61 #include <dev/flash/cqspi.h>
62 #include <dev/flash/mx25lreg.h>
63 #include <dev/xdma/xdma.h>
64
65 #include "qspi_if.h"
66
67 #define CQSPI_DEBUG
68 #undef CQSPI_DEBUG
69
70 #ifdef CQSPI_DEBUG
71 #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
72 #else
73 #define dprintf(fmt, ...)
74 #endif
75
76 #define CQSPI_SECTORSIZE 512
77 #define TX_QUEUE_SIZE 16
78 #define RX_QUEUE_SIZE 16
79
80 #define READ4(_sc, _reg) bus_read_4((_sc)->res[0], _reg)
81 #define READ2(_sc, _reg) bus_read_2((_sc)->res[0], _reg)
82 #define READ1(_sc, _reg) bus_read_1((_sc)->res[0], _reg)
83 #define WRITE4(_sc, _reg, _val) bus_write_4((_sc)->res[0], _reg, _val)
84 #define WRITE2(_sc, _reg, _val) bus_write_2((_sc)->res[0], _reg, _val)
85 #define WRITE1(_sc, _reg, _val) bus_write_1((_sc)->res[0], _reg, _val)
86 #define READ_DATA_4(_sc, _reg) bus_read_4((_sc)->res[1], _reg)
87 #define READ_DATA_1(_sc, _reg) bus_read_1((_sc)->res[1], _reg)
88 #define WRITE_DATA_4(_sc, _reg, _val) bus_write_4((_sc)->res[1], _reg, _val)
89 #define WRITE_DATA_1(_sc, _reg, _val) bus_write_1((_sc)->res[1], _reg, _val)
90
91 struct cqspi_softc {
92 device_t dev;
93
94 struct resource *res[3];
95 bus_space_tag_t bst;
96 bus_space_handle_t bsh;
97 void *ih;
98 uint8_t read_op_done;
99 uint8_t write_op_done;
100
101 uint32_t fifo_depth;
102 uint32_t fifo_width;
103 uint32_t trigger_address;
104 uint32_t sram_phys;
105
106 /* xDMA */
107 xdma_controller_t *xdma_tx;
108 xdma_channel_t *xchan_tx;
109 void *ih_tx;
110
111 xdma_controller_t *xdma_rx;
112 xdma_channel_t *xchan_rx;
113 void *ih_rx;
114
115 struct intr_config_hook config_intrhook;
116 struct mtx sc_mtx;
117 };
118
119 #define CQSPI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
120 #define CQSPI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
121 #define CQSPI_LOCK_INIT(_sc) \
122 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
123 "cqspi", MTX_DEF)
124 #define CQSPI_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
125 #define CQSPI_ASSERT_LOCKED(_sc) \
126 mtx_assert(&_sc->sc_mtx, MA_OWNED);
127 #define CQSPI_ASSERT_UNLOCKED(_sc) \
128 mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
129
130 static struct resource_spec cqspi_spec[] = {
131 { SYS_RES_MEMORY, 0, RF_ACTIVE },
132 { SYS_RES_MEMORY, 1, RF_ACTIVE },
133 { SYS_RES_IRQ, 0, RF_ACTIVE },
134 { -1, 0 }
135 };
136
137 static struct ofw_compat_data compat_data[] = {
138 { "cdns,qspi-nor", 1 },
139 { NULL, 0 },
140 };
141
142 static void
cqspi_intr(void * arg)143 cqspi_intr(void *arg)
144 {
145 struct cqspi_softc *sc;
146 uint32_t pending;
147
148 sc = arg;
149
150 pending = READ4(sc, CQSPI_IRQSTAT);
151
152 dprintf("%s: IRQSTAT %x\n", __func__, pending);
153
154 if (pending & (IRQMASK_INDOPDONE | IRQMASK_INDXFRLVL |
155 IRQMASK_INDSRAMFULL)) {
156 /* TODO: PIO operation done */
157 }
158
159 WRITE4(sc, CQSPI_IRQSTAT, pending);
160 }
161
162 static int
cqspi_xdma_tx_intr(void * arg,xdma_transfer_status_t * status)163 cqspi_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
164 {
165 struct xdma_transfer_status st;
166 struct cqspi_softc *sc;
167 struct bio *bp;
168 int ret;
169 int deq;
170
171 sc = arg;
172
173 dprintf("%s\n", __func__);
174
175 deq = 0;
176
177 while (1) {
178 ret = xdma_dequeue_bio(sc->xchan_tx, &bp, &st);
179 if (ret != 0) {
180 break;
181 }
182 sc->write_op_done = 1;
183 deq++;
184 }
185
186 if (deq > 1)
187 device_printf(sc->dev,
188 "Warning: more than 1 tx bio dequeued\n");
189
190 wakeup(&sc->xdma_tx);
191
192 return (0);
193 }
194
195 static int
cqspi_xdma_rx_intr(void * arg,xdma_transfer_status_t * status)196 cqspi_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
197 {
198 struct xdma_transfer_status st;
199 struct cqspi_softc *sc;
200 struct bio *bp;
201 int ret;
202 int deq;
203
204 sc = arg;
205
206 dprintf("%s\n", __func__);
207
208 deq = 0;
209
210 while (1) {
211 ret = xdma_dequeue_bio(sc->xchan_rx, &bp, &st);
212 if (ret != 0) {
213 break;
214 }
215 sc->read_op_done = 1;
216 deq++;
217 }
218
219 if (deq > 1)
220 device_printf(sc->dev,
221 "Warning: more than 1 rx bio dequeued\n");
222
223 wakeup(&sc->xdma_rx);
224
225 return (0);
226 }
227
228 static int
cqspi_wait_for_completion(struct cqspi_softc * sc)229 cqspi_wait_for_completion(struct cqspi_softc *sc)
230 {
231 int timeout;
232 int i;
233
234 timeout = 10000;
235
236 for (i = timeout; i > 0; i--) {
237 if ((READ4(sc, CQSPI_FLASHCMD) & FLASHCMD_CMDEXECSTAT) == 0) {
238 break;
239 }
240 }
241
242 if (i == 0) {
243 device_printf(sc->dev, "%s: cmd timed out: %x\n",
244 __func__, READ4(sc, CQSPI_FLASHCMD));
245 return (-1);
246 }
247
248 return (0);
249 }
250
251 static int
cqspi_cmd_write_addr(struct cqspi_softc * sc,uint8_t cmd,uint32_t addr,uint32_t len)252 cqspi_cmd_write_addr(struct cqspi_softc *sc, uint8_t cmd,
253 uint32_t addr, uint32_t len)
254 {
255 uint32_t reg;
256 int ret;
257
258 dprintf("%s: %x\n", __func__, cmd);
259
260 WRITE4(sc, CQSPI_FLASHCMDADDR, addr);
261 reg = (cmd << FLASHCMD_CMDOPCODE_S);
262 reg |= (FLASHCMD_ENCMDADDR);
263 reg |= ((len - 1) << FLASHCMD_NUMADDRBYTES_S);
264 WRITE4(sc, CQSPI_FLASHCMD, reg);
265
266 reg |= FLASHCMD_EXECCMD;
267 WRITE4(sc, CQSPI_FLASHCMD, reg);
268
269 ret = cqspi_wait_for_completion(sc);
270
271 return (ret);
272 }
273
274 static int
cqspi_cmd_write(struct cqspi_softc * sc,uint8_t cmd,uint8_t * addr,uint32_t len)275 cqspi_cmd_write(struct cqspi_softc *sc, uint8_t cmd,
276 uint8_t *addr, uint32_t len)
277 {
278 uint32_t reg;
279 int ret;
280
281 reg = (cmd << FLASHCMD_CMDOPCODE_S);
282 WRITE4(sc, CQSPI_FLASHCMD, reg);
283 reg |= FLASHCMD_EXECCMD;
284 WRITE4(sc, CQSPI_FLASHCMD, reg);
285
286 ret = cqspi_wait_for_completion(sc);
287
288 return (ret);
289 }
290
291 static int
cqspi_cmd_read(struct cqspi_softc * sc,uint8_t cmd,uint8_t * addr,uint32_t len)292 cqspi_cmd_read(struct cqspi_softc *sc, uint8_t cmd,
293 uint8_t *addr, uint32_t len)
294 {
295 uint32_t data;
296 uint32_t reg;
297 uint8_t *buf;
298 int ret;
299 int i;
300
301 if (len > 8) {
302 device_printf(sc->dev, "Failed to read data\n");
303 return (-1);
304 }
305
306 dprintf("%s: %x\n", __func__, cmd);
307
308 buf = (uint8_t *)addr;
309
310 reg = (cmd << FLASHCMD_CMDOPCODE_S);
311 reg |= ((len - 1) << FLASHCMD_NUMRDDATABYTES_S);
312 reg |= FLASHCMD_ENRDDATA;
313 WRITE4(sc, CQSPI_FLASHCMD, reg);
314
315 reg |= FLASHCMD_EXECCMD;
316 WRITE4(sc, CQSPI_FLASHCMD, reg);
317
318 ret = cqspi_wait_for_completion(sc);
319 if (ret != 0) {
320 device_printf(sc->dev, "%s: cmd failed: %x\n",
321 __func__, cmd);
322 return (ret);
323 }
324
325 data = READ4(sc, CQSPI_FLASHCMDRDDATALO);
326
327 for (i = 0; i < len; i++)
328 buf[i] = (data >> (i * 8)) & 0xff;
329
330 return (0);
331 }
332
333 static int
cqspi_wait_ready(struct cqspi_softc * sc)334 cqspi_wait_ready(struct cqspi_softc *sc)
335 {
336 uint8_t data;
337
338 do {
339 cqspi_cmd_read(sc, CMD_READ_STATUS, &data, 1);
340 } while (data & STATUS_WIP);
341
342 return (0);
343 }
344
345 static int
cqspi_write_reg(device_t dev,device_t child,uint8_t opcode,uint8_t * addr,uint32_t len)346 cqspi_write_reg(device_t dev, device_t child,
347 uint8_t opcode, uint8_t *addr, uint32_t len)
348 {
349 struct cqspi_softc *sc;
350 int ret;
351
352 sc = device_get_softc(dev);
353
354 ret = cqspi_cmd_write(sc, opcode, addr, len);
355
356 return (ret);
357 }
358
359 static int
cqspi_read_reg(device_t dev,device_t child,uint8_t opcode,uint8_t * addr,uint32_t len)360 cqspi_read_reg(device_t dev, device_t child,
361 uint8_t opcode, uint8_t *addr, uint32_t len)
362 {
363 struct cqspi_softc *sc;
364 int ret;
365
366 sc = device_get_softc(dev);
367
368 ret = cqspi_cmd_read(sc, opcode, addr, len);
369
370 return (ret);
371 }
372
373 static int
cqspi_wait_idle(struct cqspi_softc * sc)374 cqspi_wait_idle(struct cqspi_softc *sc)
375 {
376 uint32_t reg;
377
378 do {
379 reg = READ4(sc, CQSPI_CFG);
380 if (reg & CFG_IDLE) {
381 break;
382 }
383 } while (1);
384
385 return (0);
386 }
387
388 static int
cqspi_erase(device_t dev,device_t child,off_t offset)389 cqspi_erase(device_t dev, device_t child, off_t offset)
390 {
391 struct cqspi_softc *sc;
392
393 sc = device_get_softc(dev);
394
395 cqspi_wait_idle(sc);
396 cqspi_wait_ready(sc);
397 cqspi_cmd_write(sc, CMD_WRITE_ENABLE, 0, 0);
398
399 cqspi_wait_idle(sc);
400 cqspi_wait_ready(sc);
401 cqspi_cmd_write_addr(sc, CMD_QUAD_SECTOR_ERASE, offset, 4);
402
403 cqspi_wait_idle(sc);
404
405 return (0);
406 }
407
408 static int
cqspi_write(device_t dev,device_t child,struct bio * bp,off_t offset,caddr_t data,off_t count)409 cqspi_write(device_t dev, device_t child, struct bio *bp,
410 off_t offset, caddr_t data, off_t count)
411 {
412 struct cqspi_softc *sc;
413 uint32_t reg;
414
415 dprintf("%s: offset 0x%llx count %lld bytes\n",
416 __func__, offset, count);
417
418 sc = device_get_softc(dev);
419
420 cqspi_wait_ready(sc);
421 cqspi_cmd_write(sc, CMD_WRITE_ENABLE, 0, 0);
422
423 cqspi_wait_idle(sc);
424 cqspi_wait_ready(sc);
425 cqspi_wait_idle(sc);
426
427 reg = DMAPER_NUMSGLREQBYTES_4;
428 reg |= DMAPER_NUMBURSTREQBYTES_4;
429 WRITE4(sc, CQSPI_DMAPER, reg);
430
431 WRITE4(sc, CQSPI_INDWRWATER, 64);
432 WRITE4(sc, CQSPI_INDWR, INDRD_IND_OPS_DONE_STATUS);
433 WRITE4(sc, CQSPI_INDWR, 0);
434
435 WRITE4(sc, CQSPI_INDWRCNT, count);
436 WRITE4(sc, CQSPI_INDWRSTADDR, offset);
437
438 reg = (0 << DEVWR_DUMMYWRCLKS_S);
439 reg |= DEVWR_DATA_WIDTH_QUAD;
440 reg |= DEVWR_ADDR_WIDTH_SINGLE;
441 reg |= (CMD_QUAD_PAGE_PROGRAM << DEVWR_WROPCODE_S);
442 WRITE4(sc, CQSPI_DEVWR, reg);
443
444 reg = DEVRD_DATA_WIDTH_QUAD;
445 reg |= DEVRD_ADDR_WIDTH_SINGLE;
446 reg |= DEVRD_INST_WIDTH_SINGLE;
447 WRITE4(sc, CQSPI_DEVRD, reg);
448
449 xdma_enqueue_bio(sc->xchan_tx, &bp,
450 sc->sram_phys, 4, 4, XDMA_MEM_TO_DEV);
451 xdma_queue_submit(sc->xchan_tx);
452
453 sc->write_op_done = 0;
454
455 WRITE4(sc, CQSPI_INDWR, INDRD_START);
456
457 while (sc->write_op_done == 0)
458 tsleep(&sc->xdma_tx, PCATCH | PZERO, "spi", hz/2);
459
460 cqspi_wait_idle(sc);
461
462 return (0);
463 }
464
465 static int
cqspi_read(device_t dev,device_t child,struct bio * bp,off_t offset,caddr_t data,off_t count)466 cqspi_read(device_t dev, device_t child, struct bio *bp,
467 off_t offset, caddr_t data, off_t count)
468 {
469 struct cqspi_softc *sc;
470 uint32_t reg;
471
472 sc = device_get_softc(dev);
473
474 dprintf("%s: offset 0x%llx count %lld bytes\n",
475 __func__, offset, count);
476
477 cqspi_wait_idle(sc);
478
479 reg = DMAPER_NUMSGLREQBYTES_4;
480 reg |= DMAPER_NUMBURSTREQBYTES_4;
481 WRITE4(sc, CQSPI_DMAPER, reg);
482
483 WRITE4(sc, CQSPI_INDRDWATER, 64);
484 WRITE4(sc, CQSPI_INDRD, INDRD_IND_OPS_DONE_STATUS);
485 WRITE4(sc, CQSPI_INDRD, 0);
486
487 WRITE4(sc, CQSPI_INDRDCNT, count);
488 WRITE4(sc, CQSPI_INDRDSTADDR, offset);
489
490 reg = (0 << DEVRD_DUMMYRDCLKS_S);
491 reg |= DEVRD_DATA_WIDTH_QUAD;
492 reg |= DEVRD_ADDR_WIDTH_SINGLE;
493 reg |= DEVRD_INST_WIDTH_SINGLE;
494 reg |= DEVRD_ENMODEBITS;
495 reg |= (CMD_READ_4B_QUAD_OUTPUT << DEVRD_RDOPCODE_S);
496 WRITE4(sc, CQSPI_DEVRD, reg);
497
498 WRITE4(sc, CQSPI_MODEBIT, 0xff);
499 WRITE4(sc, CQSPI_IRQMASK, 0);
500
501 xdma_enqueue_bio(sc->xchan_rx, &bp, sc->sram_phys, 4, 4,
502 XDMA_DEV_TO_MEM);
503 xdma_queue_submit(sc->xchan_rx);
504
505 sc->read_op_done = 0;
506
507 WRITE4(sc, CQSPI_INDRD, INDRD_START);
508
509 while (sc->read_op_done == 0)
510 tsleep(&sc->xdma_rx, PCATCH | PZERO, "spi", hz/2);
511
512 cqspi_wait_idle(sc);
513
514 return (0);
515 }
516
517 static int
cqspi_init(struct cqspi_softc * sc)518 cqspi_init(struct cqspi_softc *sc)
519 {
520 pcell_t dts_value[1];
521 phandle_t node;
522 uint32_t reg;
523 int len;
524
525 device_printf(sc->dev, "Module ID %x\n",
526 READ4(sc, CQSPI_MODULEID));
527
528 if ((node = ofw_bus_get_node(sc->dev)) == -1) {
529 return (ENXIO);
530 }
531
532 if ((len = OF_getproplen(node, "cdns,fifo-depth")) <= 0) {
533 return (ENXIO);
534 }
535 OF_getencprop(node, "cdns,fifo-depth", dts_value, len);
536 sc->fifo_depth = dts_value[0];
537
538 if ((len = OF_getproplen(node, "cdns,fifo-width")) <= 0) {
539 return (ENXIO);
540 }
541 OF_getencprop(node, "cdns,fifo-width", dts_value, len);
542 sc->fifo_width = dts_value[0];
543
544 if ((len = OF_getproplen(node, "cdns,trigger-address")) <= 0) {
545 return (ENXIO);
546 }
547 OF_getencprop(node, "cdns,trigger-address", dts_value, len);
548 sc->trigger_address = dts_value[0];
549
550 /* Disable controller */
551 reg = READ4(sc, CQSPI_CFG);
552 reg &= ~(CFG_EN);
553 WRITE4(sc, CQSPI_CFG, reg);
554
555 reg = READ4(sc, CQSPI_DEVSZ);
556 reg &= ~(DEVSZ_NUMADDRBYTES_M);
557 reg |= ((4 - 1) - DEVSZ_NUMADDRBYTES_S);
558 WRITE4(sc, CQSPI_DEVSZ, reg);
559
560 WRITE4(sc, CQSPI_SRAMPART, sc->fifo_depth/2);
561
562 /* TODO: calculate baud rate and delay values. */
563
564 reg = READ4(sc, CQSPI_CFG);
565 /* Configure baud rate */
566 reg &= ~(CFG_BAUD_M);
567 reg |= CFG_BAUD12;
568 reg |= CFG_ENDMA;
569 WRITE4(sc, CQSPI_CFG, reg);
570
571 reg = (3 << DELAY_NSS_S);
572 reg |= (3 << DELAY_BTWN_S);
573 reg |= (1 << DELAY_AFTER_S);
574 reg |= (1 << DELAY_INIT_S);
575 WRITE4(sc, CQSPI_DELAY, reg);
576
577 READ4(sc, CQSPI_RDDATACAP);
578 reg &= ~(RDDATACAP_DELAY_M);
579 reg |= (1 << RDDATACAP_DELAY_S);
580 WRITE4(sc, CQSPI_RDDATACAP, reg);
581
582 /* Enable controller */
583 reg = READ4(sc, CQSPI_CFG);
584 reg |= (CFG_EN);
585 WRITE4(sc, CQSPI_CFG, reg);
586
587 return (0);
588 }
589
590 static int
cqspi_add_devices(device_t dev)591 cqspi_add_devices(device_t dev)
592 {
593 phandle_t child, node;
594 device_t child_dev;
595 int error;
596
597 node = ofw_bus_get_node(dev);
598
599 for (child = OF_child(node); child != 0; child = OF_peer(child)) {
600 child_dev =
601 simplebus_add_device(dev, child, 0, NULL, -1, NULL);
602 if (child_dev == NULL) {
603 return (ENXIO);
604 }
605
606 error = device_probe_and_attach(child_dev);
607 if (error != 0) {
608 printf("can't probe and attach: %d\n", error);
609 }
610 }
611
612 return (0);
613 }
614
615 static void
cqspi_delayed_attach(void * arg)616 cqspi_delayed_attach(void *arg)
617 {
618 struct cqspi_softc *sc;
619
620 sc = arg;
621
622 cqspi_add_devices(sc->dev);
623 bus_generic_attach(sc->dev);
624
625 config_intrhook_disestablish(&sc->config_intrhook);
626 }
627
628 static int
cqspi_probe(device_t dev)629 cqspi_probe(device_t dev)
630 {
631
632 if (!ofw_bus_status_okay(dev)) {
633 return (ENXIO);
634 }
635
636 if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) {
637 return (ENXIO);
638 }
639
640 device_set_desc(dev, "Cadence Quad SPI controller");
641
642 return (0);
643 }
644
645 static int
cqspi_attach(device_t dev)646 cqspi_attach(device_t dev)
647 {
648 struct cqspi_softc *sc;
649 uint32_t caps;
650 int error;
651
652 sc = device_get_softc(dev);
653 sc->dev = dev;
654
655 if (bus_alloc_resources(dev, cqspi_spec, sc->res)) {
656 device_printf(dev, "could not allocate resources\n");
657 return (ENXIO);
658 }
659
660 /* Memory interface */
661 sc->bst = rman_get_bustag(sc->res[0]);
662 sc->bsh = rman_get_bushandle(sc->res[0]);
663
664 sc->sram_phys = rman_get_start(sc->res[1]);
665
666 /* Setup interrupt handlers */
667 if (bus_setup_intr(sc->dev, sc->res[2], INTR_TYPE_BIO | INTR_MPSAFE,
668 NULL, cqspi_intr, sc, &sc->ih)) {
669 device_printf(sc->dev, "Unable to setup intr\n");
670 return (ENXIO);
671 }
672
673 CQSPI_LOCK_INIT(sc);
674
675 caps = 0;
676
677 /* Get xDMA controller. */
678 sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
679 if (sc->xdma_tx == NULL) {
680 device_printf(dev, "Can't find DMA controller.\n");
681 return (ENXIO);
682 }
683
684 sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
685 if (sc->xdma_rx == NULL) {
686 device_printf(dev, "Can't find DMA controller.\n");
687 return (ENXIO);
688 }
689
690 /* Alloc xDMA virtual channels. */
691 sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, caps);
692 if (sc->xchan_tx == NULL) {
693 device_printf(dev, "Can't alloc virtual DMA channel.\n");
694 return (ENXIO);
695 }
696
697 sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, caps);
698 if (sc->xchan_rx == NULL) {
699 device_printf(dev, "Can't alloc virtual DMA channel.\n");
700 return (ENXIO);
701 }
702
703 /* Setup xDMA interrupt handlers. */
704 error = xdma_setup_intr(sc->xchan_tx, 0, cqspi_xdma_tx_intr,
705 sc, &sc->ih_tx);
706 if (error) {
707 device_printf(sc->dev,
708 "Can't setup xDMA interrupt handler.\n");
709 return (ENXIO);
710 }
711
712 error = xdma_setup_intr(sc->xchan_rx, 0, cqspi_xdma_rx_intr,
713 sc, &sc->ih_rx);
714 if (error) {
715 device_printf(sc->dev,
716 "Can't setup xDMA interrupt handler.\n");
717 return (ENXIO);
718 }
719
720 xdma_prep_sg(sc->xchan_tx, TX_QUEUE_SIZE, maxphys, 8, 16, 0,
721 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR);
722 xdma_prep_sg(sc->xchan_rx, TX_QUEUE_SIZE, maxphys, 8, 16, 0,
723 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR);
724
725 cqspi_init(sc);
726
727 sc->config_intrhook.ich_func = cqspi_delayed_attach;
728 sc->config_intrhook.ich_arg = sc;
729 if (config_intrhook_establish(&sc->config_intrhook) != 0) {
730 device_printf(dev, "config_intrhook_establish failed\n");
731 return (ENOMEM);
732 }
733
734 return (0);
735 }
736
737 static int
cqspi_detach(device_t dev)738 cqspi_detach(device_t dev)
739 {
740
741 return (ENXIO);
742 }
743
744 static device_method_t cqspi_methods[] = {
745 /* Device interface */
746 DEVMETHOD(device_probe, cqspi_probe),
747 DEVMETHOD(device_attach, cqspi_attach),
748 DEVMETHOD(device_detach, cqspi_detach),
749
750 /* Quad SPI Flash Interface */
751 DEVMETHOD(qspi_read_reg, cqspi_read_reg),
752 DEVMETHOD(qspi_write_reg, cqspi_write_reg),
753 DEVMETHOD(qspi_read, cqspi_read),
754 DEVMETHOD(qspi_write, cqspi_write),
755 DEVMETHOD(qspi_erase, cqspi_erase),
756
757 { 0, 0 }
758 };
759
760 DEFINE_CLASS_1(cqspi, cqspi_driver, cqspi_methods,
761 sizeof(struct cqspi_softc), simplebus_driver);
762
763 DRIVER_MODULE(cqspi, simplebus, cqspi_driver, 0, 0);
764