1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org>
5 * Copyright (c) 2013 Alexander Fedorov
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/conf.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/resource.h>
40 #include <sys/rman.h>
41 #include <sys/sysctl.h>
42 #include <sys/queue.h>
43 #include <sys/taskqueue.h>
44
45 #include <machine/bus.h>
46
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
49
50 #include <dev/mmc/bridge.h>
51 #include <dev/mmc/mmcbrvar.h>
52 #include <dev/mmc/mmc_fdt_helpers.h>
53
54 #include <arm/allwinner/aw_mmc.h>
55 #include <dev/clk/clk.h>
56 #include <dev/hwreset/hwreset.h>
57 #include <dev/regulator/regulator.h>
58
59 #include "opt_mmccam.h"
60
61 #ifdef MMCCAM
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/mmc/mmc_sim.h>
68
69 #include "mmc_sim_if.h"
70 #endif
71
72 #include "mmc_pwrseq_if.h"
73
74 #define AW_MMC_MEMRES 0
75 #define AW_MMC_IRQRES 1
76 #define AW_MMC_RESSZ 2
77 #define AW_MMC_DMA_SEGS (PAGE_SIZE / sizeof(struct aw_mmc_dma_desc))
78 #define AW_MMC_DMA_DESC_SIZE (sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS)
79 #define AW_MMC_DMA_FTRGLEVEL 0x20070008
80
81 #define AW_MMC_RESET_RETRY 1000
82
83 #define CARD_ID_FREQUENCY 400000
84
85 struct aw_mmc_conf {
86 uint32_t dma_xferlen;
87 uint32_t dma_desc_shift;
88 bool mask_data0;
89 bool can_calibrate;
90 bool new_timing;
91 bool zero_is_skip;
92 };
93
94 static const struct aw_mmc_conf a10_mmc_conf = {
95 .dma_xferlen = 0x2000,
96 .dma_desc_shift = 0,
97 };
98
99 static const struct aw_mmc_conf a13_mmc_conf = {
100 .dma_xferlen = 0x10000,
101 .dma_desc_shift = 0,
102 };
103
104 static const struct aw_mmc_conf a64_mmc_conf = {
105 .dma_xferlen = 0x10000,
106 .dma_desc_shift = 0,
107 .mask_data0 = true,
108 .can_calibrate = true,
109 .new_timing = true,
110 };
111
112 static const struct aw_mmc_conf a64_emmc_conf = {
113 .dma_xferlen = 0x2000,
114 .dma_desc_shift = 0,
115 .can_calibrate = true,
116 };
117
118 static const struct aw_mmc_conf d1_mmc_conf = {
119 .dma_xferlen = 0x1000,
120 .dma_desc_shift = 2,
121 .mask_data0 = true,
122 .can_calibrate = true,
123 .new_timing = true,
124 .zero_is_skip = true,
125 };
126
127 static struct ofw_compat_data compat_data[] = {
128 {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf},
129 {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf},
130 {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf},
131 {"allwinner,sun20i-d1-mmc", (uintptr_t)&d1_mmc_conf},
132 {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf},
133 {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf},
134 {NULL, 0}
135 };
136
137 struct aw_mmc_softc {
138 device_t aw_dev;
139 clk_t aw_clk_ahb;
140 clk_t aw_clk_mmc;
141 hwreset_t aw_rst_ahb;
142 int aw_bus_busy;
143 int aw_resid;
144 int aw_timeout;
145 struct callout aw_timeoutc;
146 struct mmc_host aw_host;
147 struct mmc_helper mmc_helper;
148 #ifdef MMCCAM
149 union ccb * ccb;
150 struct mmc_sim mmc_sim;
151 #else
152 struct mmc_request * aw_req;
153 #endif
154 struct mtx aw_mtx;
155 struct resource * aw_res[AW_MMC_RESSZ];
156 struct aw_mmc_conf * aw_mmc_conf;
157 uint32_t aw_intr;
158 uint32_t aw_intr_wait;
159 void * aw_intrhand;
160 unsigned int aw_clock;
161 device_t child;
162
163 /* Fields required for DMA access. */
164 bus_addr_t aw_dma_desc_phys;
165 bus_dmamap_t aw_dma_map;
166 bus_dma_tag_t aw_dma_tag;
167 void * aw_dma_desc;
168 bus_dmamap_t aw_dma_buf_map;
169 bus_dma_tag_t aw_dma_buf_tag;
170 int aw_dma_map_err;
171 };
172
173 static struct resource_spec aw_mmc_res_spec[] = {
174 { SYS_RES_MEMORY, 0, RF_ACTIVE },
175 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
176 { -1, 0, 0 }
177 };
178
179 static int aw_mmc_probe(device_t);
180 static int aw_mmc_attach(device_t);
181 static int aw_mmc_detach(device_t);
182 static int aw_mmc_setup_dma(struct aw_mmc_softc *);
183 static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc);
184 static int aw_mmc_reset(struct aw_mmc_softc *);
185 static int aw_mmc_init(struct aw_mmc_softc *);
186 static void aw_mmc_intr(void *);
187 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t);
188 static void aw_mmc_helper_cd_handler(device_t, bool);
189
190 static void aw_mmc_print_error(uint32_t);
191 static int aw_mmc_update_ios(device_t, device_t);
192 static int aw_mmc_request(device_t, device_t, struct mmc_request *);
193
194 #ifndef MMCCAM
195 static int aw_mmc_get_ro(device_t, device_t);
196 static int aw_mmc_acquire_host(device_t, device_t);
197 static int aw_mmc_release_host(device_t, device_t);
198 #endif
199
200 #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx)
201 #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx)
202 #define AW_MMC_READ_4(_sc, _reg) \
203 bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg)
204 #define AW_MMC_WRITE_4(_sc, _reg, _value) \
205 bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value)
206
207 SYSCTL_NODE(_hw, OID_AUTO, aw_mmc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
208 "aw_mmc driver");
209
210 static int aw_mmc_debug = 0;
211 SYSCTL_INT(_hw_aw_mmc, OID_AUTO, debug, CTLFLAG_RWTUN, &aw_mmc_debug, 0,
212 "Debug level bit0=card changes bit1=ios changes, bit2=interrupts, bit3=commands");
213 #define AW_MMC_DEBUG_CARD 0x1
214 #define AW_MMC_DEBUG_IOS 0x2
215 #define AW_MMC_DEBUG_INT 0x4
216 #define AW_MMC_DEBUG_CMD 0x8
217
218 #ifdef MMCCAM
219 static int
aw_mmc_get_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)220 aw_mmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
221 {
222 struct aw_mmc_softc *sc;
223
224 sc = device_get_softc(dev);
225
226 cts->host_ocr = sc->aw_host.host_ocr;
227 cts->host_f_min = sc->aw_host.f_min;
228 cts->host_f_max = sc->aw_host.f_max;
229 cts->host_caps = sc->aw_host.caps;
230 cts->host_max_data = (sc->aw_mmc_conf->dma_xferlen *
231 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
232 memcpy(&cts->ios, &sc->aw_host.ios, sizeof(struct mmc_ios));
233
234 return (0);
235 }
236
237 static int
aw_mmc_set_tran_settings(device_t dev,struct ccb_trans_settings_mmc * cts)238 aw_mmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
239 {
240 struct aw_mmc_softc *sc;
241 struct mmc_ios *ios;
242 struct mmc_ios *new_ios;
243
244 sc = device_get_softc(dev);
245 ios = &sc->aw_host.ios;
246 new_ios = &cts->ios;
247
248 /* Update only requested fields */
249 if (cts->ios_valid & MMC_CLK) {
250 ios->clock = new_ios->clock;
251 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
252 device_printf(sc->aw_dev, "Clock => %d\n", ios->clock);
253 }
254 if (cts->ios_valid & MMC_VDD) {
255 ios->vdd = new_ios->vdd;
256 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
257 device_printf(sc->aw_dev, "VDD => %d\n", ios->vdd);
258 }
259 if (cts->ios_valid & MMC_CS) {
260 ios->chip_select = new_ios->chip_select;
261 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
262 device_printf(sc->aw_dev, "CS => %d\n", ios->chip_select);
263 }
264 if (cts->ios_valid & MMC_BW) {
265 ios->bus_width = new_ios->bus_width;
266 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
267 device_printf(sc->aw_dev, "Bus width => %d\n", ios->bus_width);
268 }
269 if (cts->ios_valid & MMC_PM) {
270 ios->power_mode = new_ios->power_mode;
271 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
272 device_printf(sc->aw_dev, "Power mode => %d\n", ios->power_mode);
273 }
274 if (cts->ios_valid & MMC_BT) {
275 ios->timing = new_ios->timing;
276 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
277 device_printf(sc->aw_dev, "Timing => %d\n", ios->timing);
278 }
279 if (cts->ios_valid & MMC_BM) {
280 ios->bus_mode = new_ios->bus_mode;
281 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS))
282 device_printf(sc->aw_dev, "Bus mode => %d\n", ios->bus_mode);
283 }
284
285 return (aw_mmc_update_ios(sc->aw_dev, NULL));
286 }
287
288 static int
aw_mmc_cam_request(device_t dev,union ccb * ccb)289 aw_mmc_cam_request(device_t dev, union ccb *ccb)
290 {
291 struct aw_mmc_softc *sc;
292 struct ccb_mmcio *mmcio;
293
294 sc = device_get_softc(dev);
295 mmcio = &ccb->mmcio;
296
297 AW_MMC_LOCK(sc);
298
299 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
300 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
301 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
302 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
303 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
304 }
305 if (mmcio->cmd.data != NULL) {
306 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
307 panic("data->len = %d, data->flags = %d -- something is b0rked",
308 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
309 }
310 if (sc->ccb != NULL) {
311 device_printf(sc->aw_dev, "Controller still has an active command\n");
312 return (EBUSY);
313 }
314 sc->ccb = ccb;
315 /* aw_mmc_request locks again */
316 AW_MMC_UNLOCK(sc);
317 aw_mmc_request(sc->aw_dev, NULL, NULL);
318
319 return (0);
320 }
321
322 static void
aw_mmc_cam_poll(device_t dev)323 aw_mmc_cam_poll(device_t dev)
324 {
325 struct aw_mmc_softc *sc;
326
327 sc = device_get_softc(dev);
328 aw_mmc_intr(sc);
329 }
330 #endif /* MMCCAM */
331
332 static void
aw_mmc_helper_cd_handler(device_t dev,bool present)333 aw_mmc_helper_cd_handler(device_t dev, bool present)
334 {
335 struct aw_mmc_softc *sc;
336
337 sc = device_get_softc(dev);
338 #ifdef MMCCAM
339 mmc_cam_sim_discover(&sc->mmc_sim);
340 #else
341 bus_topo_lock();
342 if (present) {
343 if (sc->child == NULL) {
344 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
345 device_printf(sc->aw_dev, "Card inserted\n");
346
347 sc->child = device_add_child(sc->aw_dev, "mmc", DEVICE_UNIT_ANY);
348 if (sc->child) {
349 device_set_ivars(sc->child, sc);
350 (void)device_probe_and_attach(sc->child);
351 }
352 }
353 } else {
354 /* Card isn't present, detach if necessary */
355 if (sc->child != NULL) {
356 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
357 device_printf(sc->aw_dev, "Card removed\n");
358
359 device_delete_child(sc->aw_dev, sc->child);
360 sc->child = NULL;
361 }
362 }
363 bus_topo_unlock();
364 #endif /* MMCCAM */
365 }
366
367 static int
aw_mmc_probe(device_t dev)368 aw_mmc_probe(device_t dev)
369 {
370
371 if (!ofw_bus_status_okay(dev))
372 return (ENXIO);
373 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
374 return (ENXIO);
375
376 device_set_desc(dev, "Allwinner Integrated MMC/SD controller");
377
378 return (BUS_PROBE_DEFAULT);
379 }
380
381 static int
aw_mmc_attach(device_t dev)382 aw_mmc_attach(device_t dev)
383 {
384 struct aw_mmc_softc *sc;
385 struct sysctl_ctx_list *ctx;
386 struct sysctl_oid_list *tree;
387 int error;
388
389 sc = device_get_softc(dev);
390 sc->aw_dev = dev;
391
392 sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
393
394 #ifndef MMCCAM
395 sc->aw_req = NULL;
396 #endif
397 if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) {
398 device_printf(dev, "cannot allocate device resources\n");
399 return (ENXIO);
400 }
401 if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES],
402 INTR_TYPE_NET | INTR_MPSAFE, NULL, aw_mmc_intr, sc,
403 &sc->aw_intrhand)) {
404 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
405 device_printf(dev, "cannot setup interrupt handler\n");
406 return (ENXIO);
407 }
408 mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc",
409 MTX_DEF);
410 callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0);
411
412 /* De-assert reset */
413 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) {
414 error = hwreset_deassert(sc->aw_rst_ahb);
415 if (error != 0) {
416 device_printf(dev, "cannot de-assert reset\n");
417 goto fail;
418 }
419 }
420
421 /* Activate the module clock. */
422 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb);
423 if (error != 0) {
424 device_printf(dev, "cannot get ahb clock\n");
425 goto fail;
426 }
427 error = clk_enable(sc->aw_clk_ahb);
428 if (error != 0) {
429 device_printf(dev, "cannot enable ahb clock\n");
430 goto fail;
431 }
432 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc);
433 if (error != 0) {
434 device_printf(dev, "cannot get mmc clock\n");
435 goto fail;
436 }
437 error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY,
438 CLK_SET_ROUND_DOWN);
439 if (error != 0) {
440 device_printf(dev, "cannot init mmc clock\n");
441 goto fail;
442 }
443 error = clk_enable(sc->aw_clk_mmc);
444 if (error != 0) {
445 device_printf(dev, "cannot enable mmc clock\n");
446 goto fail;
447 }
448
449 sc->aw_timeout = 10;
450 ctx = device_get_sysctl_ctx(dev);
451 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
452 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
453 &sc->aw_timeout, 0, "Request timeout in seconds");
454
455 /* Soft Reset controller. */
456 if (aw_mmc_reset(sc) != 0) {
457 device_printf(dev, "cannot reset the controller\n");
458 goto fail;
459 }
460
461 if (aw_mmc_setup_dma(sc) != 0) {
462 device_printf(sc->aw_dev, "Couldn't setup DMA!\n");
463 goto fail;
464 }
465
466 /* Set some defaults for freq and supported mode */
467 sc->aw_host.f_min = 400000;
468 sc->aw_host.f_max = 52000000;
469 sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
470 sc->aw_host.caps |= MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
471 mmc_fdt_parse(dev, 0, &sc->mmc_helper, &sc->aw_host);
472 mmc_fdt_gpio_setup(dev, 0, &sc->mmc_helper, aw_mmc_helper_cd_handler);
473
474 #ifdef MMCCAM
475 sc->ccb = NULL;
476
477 if (mmc_cam_sim_alloc(dev, "aw_mmc", &sc->mmc_sim) != 0) {
478 device_printf(dev, "cannot alloc cam sim\n");
479 goto fail;
480 }
481 #endif /* MMCCAM */
482
483 return (0);
484
485 fail:
486 callout_drain(&sc->aw_timeoutc);
487 mtx_destroy(&sc->aw_mtx);
488 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
489 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
490
491 return (ENXIO);
492 }
493
494 static int
aw_mmc_detach(device_t dev)495 aw_mmc_detach(device_t dev)
496 {
497 struct aw_mmc_softc *sc;
498
499 sc = device_get_softc(dev);
500
501 clk_disable(sc->aw_clk_mmc);
502 clk_disable(sc->aw_clk_ahb);
503 hwreset_assert(sc->aw_rst_ahb);
504
505 mmc_fdt_gpio_teardown(&sc->mmc_helper);
506
507 callout_drain(&sc->aw_timeoutc);
508
509 device_delete_children(sc->aw_dev);
510
511 aw_mmc_teardown_dma(sc);
512
513 mtx_destroy(&sc->aw_mtx);
514
515 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand);
516 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res);
517
518 #ifdef MMCCAM
519 mmc_cam_sim_free(&sc->mmc_sim);
520 #endif
521
522 return (0);
523 }
524
525 static void
aw_dma_desc_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int err)526 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
527 {
528 struct aw_mmc_softc *sc;
529
530 sc = (struct aw_mmc_softc *)arg;
531 if (err) {
532 sc->aw_dma_map_err = err;
533 return;
534 }
535 sc->aw_dma_desc_phys = segs[0].ds_addr;
536 }
537
538 static int
aw_mmc_setup_dma(struct aw_mmc_softc * sc)539 aw_mmc_setup_dma(struct aw_mmc_softc *sc)
540 {
541 int error;
542
543 /* Allocate the DMA descriptor memory. */
544 error = bus_dma_tag_create(
545 bus_get_dma_tag(sc->aw_dev), /* parent */
546 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
547 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
548 BUS_SPACE_MAXADDR, /* highaddr */
549 NULL, NULL, /* filter, filterarg*/
550 AW_MMC_DMA_DESC_SIZE, 1, /* maxsize, nsegment */
551 AW_MMC_DMA_DESC_SIZE, /* maxsegsize */
552 0, /* flags */
553 NULL, NULL, /* lock, lockarg*/
554 &sc->aw_dma_tag);
555 if (error)
556 return (error);
557
558 error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc,
559 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
560 &sc->aw_dma_map);
561 if (error)
562 return (error);
563
564 error = bus_dmamap_load(sc->aw_dma_tag,
565 sc->aw_dma_map,
566 sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE,
567 aw_dma_desc_cb, sc, 0);
568 if (error)
569 return (error);
570 if (sc->aw_dma_map_err)
571 return (sc->aw_dma_map_err);
572
573 /* Create the DMA map for data transfers. */
574 error = bus_dma_tag_create(
575 bus_get_dma_tag(sc->aw_dev), /* parent */
576 AW_MMC_DMA_ALIGN, 0, /* align, boundary */
577 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
578 BUS_SPACE_MAXADDR, /* highaddr */
579 NULL, NULL, /* filter, filterarg*/
580 sc->aw_mmc_conf->dma_xferlen *
581 AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, /* maxsize, nsegments */
582 sc->aw_mmc_conf->dma_xferlen, /* maxsegsize */
583 BUS_DMA_ALLOCNOW, /* flags */
584 NULL, NULL, /* lock, lockarg*/
585 &sc->aw_dma_buf_tag);
586 if (error)
587 return (error);
588 error = bus_dmamap_create(sc->aw_dma_buf_tag, 0,
589 &sc->aw_dma_buf_map);
590 if (error)
591 return (error);
592
593 return (0);
594 }
595
596 static void
aw_mmc_teardown_dma(struct aw_mmc_softc * sc)597 aw_mmc_teardown_dma(struct aw_mmc_softc *sc)
598 {
599
600 bus_dmamap_unload(sc->aw_dma_tag, sc->aw_dma_map);
601 bus_dmamem_free(sc->aw_dma_tag, sc->aw_dma_desc, sc->aw_dma_map);
602 if (bus_dma_tag_destroy(sc->aw_dma_tag) != 0)
603 device_printf(sc->aw_dev, "Cannot destroy the dma tag\n");
604
605 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
606 bus_dmamap_destroy(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
607 if (bus_dma_tag_destroy(sc->aw_dma_buf_tag) != 0)
608 device_printf(sc->aw_dev, "Cannot destroy the dma buf tag\n");
609 }
610
611 static void
aw_dma_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int err)612 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
613 {
614 int i;
615 struct aw_mmc_dma_desc *dma_desc;
616 struct aw_mmc_softc *sc;
617
618 sc = (struct aw_mmc_softc *)arg;
619 sc->aw_dma_map_err = err;
620
621 if (err)
622 return;
623
624 dma_desc = sc->aw_dma_desc;
625 for (i = 0; i < nsegs; i++) {
626 if ((segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen) &&
627 !sc->aw_mmc_conf->zero_is_skip)
628 dma_desc[i].buf_size = 0; /* Size of 0 indicate max len */
629 else
630 dma_desc[i].buf_size = segs[i].ds_len;
631 dma_desc[i].buf_addr = segs[i].ds_addr >>
632 sc->aw_mmc_conf->dma_desc_shift;
633 dma_desc[i].config = AW_MMC_DMA_CONFIG_CH |
634 AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC;
635 dma_desc[i].next = (sc->aw_dma_desc_phys +
636 (i + 1) * sizeof(struct aw_mmc_dma_desc)) >>
637 sc->aw_mmc_conf->dma_desc_shift;
638 }
639
640 dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD;
641 dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD |
642 AW_MMC_DMA_CONFIG_ER;
643 dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC;
644 dma_desc[nsegs - 1].next = 0;
645 }
646
647 static int
aw_mmc_prepare_dma(struct aw_mmc_softc * sc)648 aw_mmc_prepare_dma(struct aw_mmc_softc *sc)
649 {
650 bus_dmasync_op_t sync_op;
651 int error;
652 struct mmc_command *cmd;
653 uint32_t val;
654
655 #ifdef MMCCAM
656 cmd = &sc->ccb->mmcio.cmd;
657 #else
658 cmd = sc->aw_req->cmd;
659 #endif
660 if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS))
661 return (EFBIG);
662 error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
663 cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0);
664 if (error)
665 return (error);
666 if (sc->aw_dma_map_err)
667 return (sc->aw_dma_map_err);
668
669 if (cmd->data->flags & MMC_DATA_WRITE)
670 sync_op = BUS_DMASYNC_PREWRITE;
671 else
672 sync_op = BUS_DMASYNC_PREREAD;
673 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op);
674 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE);
675
676 /* Enable DMA */
677 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
678 val &= ~AW_MMC_GCTL_FIFO_AC_MOD;
679 val |= AW_MMC_GCTL_DMA_ENB;
680 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
681
682 /* Reset DMA */
683 val |= AW_MMC_GCTL_DMA_RST;
684 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val);
685
686 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST);
687 AW_MMC_WRITE_4(sc, AW_MMC_DMAC,
688 AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST);
689
690 /* Enable RX or TX DMA interrupt */
691 val = AW_MMC_READ_4(sc, AW_MMC_IDIE);
692 if (cmd->data->flags & MMC_DATA_WRITE)
693 val |= AW_MMC_IDST_TX_INT;
694 else
695 val |= AW_MMC_IDST_RX_INT;
696 AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val);
697
698 /* Set DMA descritptor list address */
699 AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys >>
700 sc->aw_mmc_conf->dma_desc_shift);
701
702 /* FIFO trigger level */
703 AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL);
704
705 return (0);
706 }
707
708 static int
aw_mmc_reset(struct aw_mmc_softc * sc)709 aw_mmc_reset(struct aw_mmc_softc *sc)
710 {
711 uint32_t reg;
712 int timeout;
713
714 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
715 reg |= AW_MMC_GCTL_RESET;
716 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
717 timeout = AW_MMC_RESET_RETRY;
718 while (--timeout > 0) {
719 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0)
720 break;
721 DELAY(100);
722 }
723 if (timeout == 0)
724 return (ETIMEDOUT);
725
726 return (0);
727 }
728
729 static int
aw_mmc_init(struct aw_mmc_softc * sc)730 aw_mmc_init(struct aw_mmc_softc *sc)
731 {
732 uint32_t reg;
733 int ret;
734
735 ret = aw_mmc_reset(sc);
736 if (ret != 0)
737 return (ret);
738
739 /* Set the timeout. */
740 AW_MMC_WRITE_4(sc, AW_MMC_TMOR,
741 AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) |
742 AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK));
743
744 /* Unmask interrupts. */
745 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0);
746
747 /* Clear pending interrupts. */
748 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
749
750 /* Debug register, undocumented */
751 AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb);
752
753 /* Function select register */
754 AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000);
755
756 AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff);
757
758 /* Enable interrupts and disable AHB access. */
759 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
760 reg |= AW_MMC_GCTL_INT_ENB;
761 reg &= ~AW_MMC_GCTL_FIFO_AC_MOD;
762 reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS;
763 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
764
765 return (0);
766 }
767
768 static void
aw_mmc_req_done(struct aw_mmc_softc * sc)769 aw_mmc_req_done(struct aw_mmc_softc *sc)
770 {
771 struct mmc_command *cmd;
772 #ifdef MMCCAM
773 union ccb *ccb;
774 #else
775 struct mmc_request *req;
776 #endif
777 uint32_t val, mask;
778 int retry;
779
780 #ifdef MMCCAM
781 ccb = sc->ccb;
782 cmd = &ccb->mmcio.cmd;
783 #else
784 cmd = sc->aw_req->cmd;
785 #endif
786 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
787 device_printf(sc->aw_dev, "%s: cmd %d err %d\n", __func__, cmd->opcode, cmd->error);
788 }
789 if (cmd->error != MMC_ERR_NONE) {
790 /* Reset the FIFO and DMA engines. */
791 mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST;
792 val = AW_MMC_READ_4(sc, AW_MMC_GCTL);
793 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask);
794
795 retry = AW_MMC_RESET_RETRY;
796 while (--retry > 0) {
797 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) &
798 AW_MMC_GCTL_RESET) == 0)
799 break;
800 DELAY(100);
801 }
802 if (retry == 0)
803 device_printf(sc->aw_dev,
804 "timeout resetting DMA/FIFO\n");
805 aw_mmc_update_clock(sc, 1);
806 }
807
808 if (!dumping)
809 callout_stop(&sc->aw_timeoutc);
810 sc->aw_intr = 0;
811 sc->aw_resid = 0;
812 sc->aw_dma_map_err = 0;
813 sc->aw_intr_wait = 0;
814 #ifdef MMCCAM
815 sc->ccb = NULL;
816 ccb->ccb_h.status =
817 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
818 xpt_done(ccb);
819 #else
820 req = sc->aw_req;
821 sc->aw_req = NULL;
822 req->done(req);
823 #endif
824 }
825
826 static void
aw_mmc_req_ok(struct aw_mmc_softc * sc)827 aw_mmc_req_ok(struct aw_mmc_softc *sc)
828 {
829 int timeout;
830 struct mmc_command *cmd;
831 uint32_t status;
832
833 timeout = 1000;
834 while (--timeout > 0) {
835 status = AW_MMC_READ_4(sc, AW_MMC_STAR);
836 if ((status & AW_MMC_STAR_CARD_BUSY) == 0)
837 break;
838 DELAY(1000);
839 }
840 #ifdef MMCCAM
841 cmd = &sc->ccb->mmcio.cmd;
842 #else
843 cmd = sc->aw_req->cmd;
844 #endif
845 if (timeout == 0) {
846 cmd->error = MMC_ERR_FAILED;
847 aw_mmc_req_done(sc);
848 return;
849 }
850 if (cmd->flags & MMC_RSP_PRESENT) {
851 if (cmd->flags & MMC_RSP_136) {
852 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3);
853 cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2);
854 cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1);
855 cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
856 } else
857 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0);
858 }
859 /* All data has been transferred ? */
860 if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len)
861 cmd->error = MMC_ERR_FAILED;
862 aw_mmc_req_done(sc);
863 }
864
865 static inline void
set_mmc_error(struct aw_mmc_softc * sc,int error_code)866 set_mmc_error(struct aw_mmc_softc *sc, int error_code)
867 {
868 #ifdef MMCCAM
869 sc->ccb->mmcio.cmd.error = error_code;
870 #else
871 sc->aw_req->cmd->error = error_code;
872 #endif
873 }
874
875 static void
aw_mmc_timeout(void * arg)876 aw_mmc_timeout(void *arg)
877 {
878 struct aw_mmc_softc *sc;
879
880 sc = (struct aw_mmc_softc *)arg;
881 #ifdef MMCCAM
882 if (sc->ccb != NULL) {
883 #else
884 if (sc->aw_req != NULL) {
885 #endif
886 device_printf(sc->aw_dev, "controller timeout\n");
887 set_mmc_error(sc, MMC_ERR_TIMEOUT);
888 aw_mmc_req_done(sc);
889 } else
890 device_printf(sc->aw_dev,
891 "Spurious timeout - no active request\n");
892 }
893
894 static void
895 aw_mmc_print_error(uint32_t err)
896 {
897 if(err & AW_MMC_INT_RESP_ERR)
898 printf("AW_MMC_INT_RESP_ERR ");
899 if (err & AW_MMC_INT_RESP_CRC_ERR)
900 printf("AW_MMC_INT_RESP_CRC_ERR ");
901 if (err & AW_MMC_INT_DATA_CRC_ERR)
902 printf("AW_MMC_INT_DATA_CRC_ERR ");
903 if (err & AW_MMC_INT_RESP_TIMEOUT)
904 printf("AW_MMC_INT_RESP_TIMEOUT ");
905 if (err & AW_MMC_INT_FIFO_RUN_ERR)
906 printf("AW_MMC_INT_FIFO_RUN_ERR ");
907 if (err & AW_MMC_INT_CMD_BUSY)
908 printf("AW_MMC_INT_CMD_BUSY ");
909 if (err & AW_MMC_INT_DATA_START_ERR)
910 printf("AW_MMC_INT_DATA_START_ERR ");
911 if (err & AW_MMC_INT_DATA_END_BIT_ERR)
912 printf("AW_MMC_INT_DATA_END_BIT_ERR");
913 printf("\n");
914 }
915
916 static void
917 aw_mmc_intr(void *arg)
918 {
919 bus_dmasync_op_t sync_op;
920 struct aw_mmc_softc *sc;
921 struct mmc_data *data;
922 uint32_t idst, imask, rint;
923
924 sc = (struct aw_mmc_softc *)arg;
925 AW_MMC_LOCK(sc);
926 rint = AW_MMC_READ_4(sc, AW_MMC_RISR);
927 idst = AW_MMC_READ_4(sc, AW_MMC_IDST);
928 imask = AW_MMC_READ_4(sc, AW_MMC_IMKR);
929 if (idst == 0 && imask == 0 && rint == 0) {
930 AW_MMC_UNLOCK(sc);
931 return;
932 }
933 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) {
934 device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n",
935 idst, imask, rint);
936 }
937 #ifdef MMCCAM
938 if (sc->ccb == NULL) {
939 #else
940 if (sc->aw_req == NULL) {
941 #endif
942 device_printf(sc->aw_dev,
943 "Spurious interrupt - no active request, rint: 0x%08X\n",
944 rint);
945 aw_mmc_print_error(rint);
946 goto end;
947 }
948 if (rint & AW_MMC_INT_ERR_BIT) {
949 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) {
950 device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint);
951 aw_mmc_print_error(rint);
952 }
953 if (rint & AW_MMC_INT_RESP_TIMEOUT)
954 set_mmc_error(sc, MMC_ERR_TIMEOUT);
955 else
956 set_mmc_error(sc, MMC_ERR_FAILED);
957 aw_mmc_req_done(sc);
958 goto end;
959 }
960 if (idst & AW_MMC_IDST_ERROR) {
961 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT))
962 device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst);
963 set_mmc_error(sc, MMC_ERR_FAILED);
964 aw_mmc_req_done(sc);
965 goto end;
966 }
967
968 sc->aw_intr |= rint;
969 #ifdef MMCCAM
970 data = sc->ccb->mmcio.cmd.data;
971 #else
972 data = sc->aw_req->cmd->data;
973 #endif
974 if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) {
975 if (data->flags & MMC_DATA_WRITE)
976 sync_op = BUS_DMASYNC_POSTWRITE;
977 else
978 sync_op = BUS_DMASYNC_POSTREAD;
979 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map,
980 sync_op);
981 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map,
982 BUS_DMASYNC_POSTWRITE);
983 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map);
984 sc->aw_resid = data->len >> 2;
985 }
986 if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait)
987 aw_mmc_req_ok(sc);
988
989 end:
990 AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst);
991 AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint);
992 AW_MMC_UNLOCK(sc);
993 }
994
995 static int
996 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req)
997 {
998 int blksz;
999 struct aw_mmc_softc *sc;
1000 struct mmc_command *cmd;
1001 uint32_t cmdreg, imask;
1002 int err;
1003
1004 sc = device_get_softc(bus);
1005
1006 AW_MMC_LOCK(sc);
1007 #ifdef MMCCAM
1008 KASSERT(req == NULL, ("req should be NULL in MMCCAM case!"));
1009 /*
1010 * For MMCCAM, sc->ccb has been NULL-checked and populated
1011 * by aw_mmc_cam_request() already.
1012 */
1013 cmd = &sc->ccb->mmcio.cmd;
1014 #else
1015 if (sc->aw_req) {
1016 AW_MMC_UNLOCK(sc);
1017 return (EBUSY);
1018 }
1019 sc->aw_req = req;
1020 cmd = req->cmd;
1021
1022 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) {
1023 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1024 cmd->opcode, cmd->arg, cmd->flags,
1025 cmd->data != NULL ? (unsigned int)cmd->data->len : 0,
1026 cmd->data != NULL ? cmd->data->flags: 0);
1027 }
1028 #endif
1029 cmdreg = AW_MMC_CMDR_LOAD;
1030 imask = AW_MMC_INT_ERR_BIT;
1031 sc->aw_intr_wait = 0;
1032 sc->aw_intr = 0;
1033 sc->aw_resid = 0;
1034 cmd->error = MMC_ERR_NONE;
1035
1036 if (cmd->opcode == MMC_GO_IDLE_STATE)
1037 cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ;
1038
1039 if (cmd->flags & MMC_RSP_PRESENT)
1040 cmdreg |= AW_MMC_CMDR_RESP_RCV;
1041 if (cmd->flags & MMC_RSP_136)
1042 cmdreg |= AW_MMC_CMDR_LONG_RESP;
1043 if (cmd->flags & MMC_RSP_CRC)
1044 cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC;
1045
1046 if (cmd->data) {
1047 cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER;
1048
1049 if (cmd->data->flags & MMC_DATA_MULTI) {
1050 cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG;
1051 imask |= AW_MMC_INT_AUTO_STOP_DONE;
1052 sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE;
1053 } else {
1054 sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER;
1055 imask |= AW_MMC_INT_DATA_OVER;
1056 }
1057 if (cmd->data->flags & MMC_DATA_WRITE)
1058 cmdreg |= AW_MMC_CMDR_DIR_WRITE;
1059 #ifdef MMCCAM
1060 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1061 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, cmd->data->block_size);
1062 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1063 } else
1064 #endif
1065 {
1066 blksz = min(cmd->data->len, MMC_SECTOR_SIZE);
1067 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz);
1068 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len);
1069 }
1070 } else {
1071 imask |= AW_MMC_INT_CMD_DONE;
1072 }
1073
1074 /* Enable the interrupts we are interested in */
1075 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask);
1076 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1077
1078 /* Enable auto stop if needed */
1079 AW_MMC_WRITE_4(sc, AW_MMC_A12A,
1080 cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff);
1081
1082 /* Write the command argument */
1083 AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg);
1084
1085 /*
1086 * If we don't have data start the request
1087 * if we do prepare the dma request and start the request
1088 */
1089 if (cmd->data == NULL) {
1090 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1091 } else {
1092 err = aw_mmc_prepare_dma(sc);
1093 if (err != 0)
1094 device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err);
1095
1096 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode);
1097 }
1098
1099 if (!dumping) {
1100 callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz,
1101 aw_mmc_timeout, sc);
1102 }
1103 AW_MMC_UNLOCK(sc);
1104
1105 return (0);
1106 }
1107
1108 static int
1109 aw_mmc_read_ivar(device_t bus, device_t child, int which,
1110 uintptr_t *result)
1111 {
1112 struct aw_mmc_softc *sc;
1113
1114 sc = device_get_softc(bus);
1115 switch (which) {
1116 default:
1117 return (EINVAL);
1118 case MMCBR_IVAR_BUS_MODE:
1119 *(int *)result = sc->aw_host.ios.bus_mode;
1120 break;
1121 case MMCBR_IVAR_BUS_WIDTH:
1122 *(int *)result = sc->aw_host.ios.bus_width;
1123 break;
1124 case MMCBR_IVAR_CHIP_SELECT:
1125 *(int *)result = sc->aw_host.ios.chip_select;
1126 break;
1127 case MMCBR_IVAR_CLOCK:
1128 *(int *)result = sc->aw_host.ios.clock;
1129 break;
1130 case MMCBR_IVAR_F_MIN:
1131 *(int *)result = sc->aw_host.f_min;
1132 break;
1133 case MMCBR_IVAR_F_MAX:
1134 *(int *)result = sc->aw_host.f_max;
1135 break;
1136 case MMCBR_IVAR_HOST_OCR:
1137 *(int *)result = sc->aw_host.host_ocr;
1138 break;
1139 case MMCBR_IVAR_MODE:
1140 *(int *)result = sc->aw_host.mode;
1141 break;
1142 case MMCBR_IVAR_OCR:
1143 *(int *)result = sc->aw_host.ocr;
1144 break;
1145 case MMCBR_IVAR_POWER_MODE:
1146 *(int *)result = sc->aw_host.ios.power_mode;
1147 break;
1148 case MMCBR_IVAR_VDD:
1149 *(int *)result = sc->aw_host.ios.vdd;
1150 break;
1151 case MMCBR_IVAR_VCCQ:
1152 *(int *)result = sc->aw_host.ios.vccq;
1153 break;
1154 case MMCBR_IVAR_CAPS:
1155 *(int *)result = sc->aw_host.caps;
1156 break;
1157 case MMCBR_IVAR_TIMING:
1158 *(int *)result = sc->aw_host.ios.timing;
1159 break;
1160 case MMCBR_IVAR_MAX_DATA:
1161 *(int *)result = (sc->aw_mmc_conf->dma_xferlen *
1162 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE;
1163 break;
1164 case MMCBR_IVAR_RETUNE_REQ:
1165 *(int *)result = retune_req_none;
1166 break;
1167 }
1168
1169 return (0);
1170 }
1171
1172 static int
1173 aw_mmc_write_ivar(device_t bus, device_t child, int which,
1174 uintptr_t value)
1175 {
1176 struct aw_mmc_softc *sc;
1177
1178 sc = device_get_softc(bus);
1179 switch (which) {
1180 default:
1181 return (EINVAL);
1182 case MMCBR_IVAR_BUS_MODE:
1183 sc->aw_host.ios.bus_mode = value;
1184 break;
1185 case MMCBR_IVAR_BUS_WIDTH:
1186 sc->aw_host.ios.bus_width = value;
1187 break;
1188 case MMCBR_IVAR_CHIP_SELECT:
1189 sc->aw_host.ios.chip_select = value;
1190 break;
1191 case MMCBR_IVAR_CLOCK:
1192 sc->aw_host.ios.clock = value;
1193 break;
1194 case MMCBR_IVAR_MODE:
1195 sc->aw_host.mode = value;
1196 break;
1197 case MMCBR_IVAR_OCR:
1198 sc->aw_host.ocr = value;
1199 break;
1200 case MMCBR_IVAR_POWER_MODE:
1201 sc->aw_host.ios.power_mode = value;
1202 break;
1203 case MMCBR_IVAR_VDD:
1204 sc->aw_host.ios.vdd = value;
1205 break;
1206 case MMCBR_IVAR_VCCQ:
1207 sc->aw_host.ios.vccq = value;
1208 break;
1209 case MMCBR_IVAR_TIMING:
1210 sc->aw_host.ios.timing = value;
1211 break;
1212 /* These are read-only */
1213 case MMCBR_IVAR_CAPS:
1214 case MMCBR_IVAR_HOST_OCR:
1215 case MMCBR_IVAR_F_MIN:
1216 case MMCBR_IVAR_F_MAX:
1217 case MMCBR_IVAR_MAX_DATA:
1218 return (EINVAL);
1219 }
1220
1221 return (0);
1222 }
1223
1224 static int
1225 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon)
1226 {
1227 uint32_t reg;
1228 int retry;
1229
1230 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1231 reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER |
1232 AW_MMC_CKCR_MASK_DATA0);
1233
1234 if (clkon)
1235 reg |= AW_MMC_CKCR_ENB;
1236 if (sc->aw_mmc_conf->mask_data0)
1237 reg |= AW_MMC_CKCR_MASK_DATA0;
1238
1239 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1240
1241 reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK |
1242 AW_MMC_CMDR_WAIT_PRE_OVER;
1243 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg);
1244 retry = 0xfffff;
1245
1246 while (reg & AW_MMC_CMDR_LOAD && --retry > 0) {
1247 reg = AW_MMC_READ_4(sc, AW_MMC_CMDR);
1248 DELAY(10);
1249 }
1250 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff);
1251
1252 if (reg & AW_MMC_CMDR_LOAD) {
1253 device_printf(sc->aw_dev, "timeout updating clock\n");
1254 return (ETIMEDOUT);
1255 }
1256
1257 if (sc->aw_mmc_conf->mask_data0) {
1258 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1259 reg &= ~AW_MMC_CKCR_MASK_DATA0;
1260 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1261 }
1262
1263 return (0);
1264 }
1265
1266 #ifndef MMCCAM
1267 static int
1268 aw_mmc_switch_vccq(device_t bus, device_t child)
1269 {
1270 struct aw_mmc_softc *sc;
1271 int uvolt, err;
1272
1273 sc = device_get_softc(bus);
1274
1275 if (sc->mmc_helper.vqmmc_supply == NULL)
1276 return EOPNOTSUPP;
1277
1278 switch (sc->aw_host.ios.vccq) {
1279 case vccq_180:
1280 uvolt = 1800000;
1281 break;
1282 case vccq_330:
1283 uvolt = 3300000;
1284 break;
1285 default:
1286 return EINVAL;
1287 }
1288
1289 err = regulator_set_voltage(sc->mmc_helper.vqmmc_supply, uvolt, uvolt);
1290 if (err != 0) {
1291 device_printf(sc->aw_dev,
1292 "Cannot set vqmmc to %d<->%d\n",
1293 uvolt,
1294 uvolt);
1295 return (err);
1296 }
1297
1298 return (0);
1299 }
1300 #endif
1301
1302 static int
1303 aw_mmc_update_ios(device_t bus, device_t child)
1304 {
1305 int error;
1306 struct aw_mmc_softc *sc;
1307 struct mmc_ios *ios;
1308 unsigned int clock;
1309 uint32_t reg, div = 1;
1310 int reg_status;
1311 int rv;
1312
1313 sc = device_get_softc(bus);
1314
1315 ios = &sc->aw_host.ios;
1316
1317 /* Set the bus width. */
1318 switch (ios->bus_width) {
1319 case bus_width_1:
1320 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1);
1321 break;
1322 case bus_width_4:
1323 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4);
1324 break;
1325 case bus_width_8:
1326 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8);
1327 break;
1328 }
1329
1330 switch (ios->power_mode) {
1331 case power_on:
1332 break;
1333 case power_off:
1334 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
1335 device_printf(sc->aw_dev, "Powering down sd/mmc\n");
1336
1337 if (sc->mmc_helper.vmmc_supply) {
1338 rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status);
1339 if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED)
1340 regulator_disable(sc->mmc_helper.vmmc_supply);
1341 }
1342 if (sc->mmc_helper.vqmmc_supply) {
1343 rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status);
1344 if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED)
1345 regulator_disable(sc->mmc_helper.vqmmc_supply);
1346 }
1347
1348 if (sc->mmc_helper.mmc_pwrseq)
1349 MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, false);
1350
1351 aw_mmc_reset(sc);
1352 break;
1353 case power_up:
1354 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD))
1355 device_printf(sc->aw_dev, "Powering up sd/mmc\n");
1356
1357 if (sc->mmc_helper.vmmc_supply) {
1358 rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status);
1359 if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED)
1360 regulator_enable(sc->mmc_helper.vmmc_supply);
1361 }
1362 if (sc->mmc_helper.vqmmc_supply) {
1363 rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status);
1364 if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED)
1365 regulator_enable(sc->mmc_helper.vqmmc_supply);
1366 }
1367
1368 if (sc->mmc_helper.mmc_pwrseq)
1369 MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, true);
1370 aw_mmc_init(sc);
1371 break;
1372 };
1373
1374 /* Enable ddr mode if needed */
1375 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL);
1376 if (ios->timing == bus_timing_uhs_ddr50 ||
1377 ios->timing == bus_timing_mmc_ddr52)
1378 reg |= AW_MMC_GCTL_DDR_MOD_SEL;
1379 else
1380 reg &= ~AW_MMC_GCTL_DDR_MOD_SEL;
1381 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg);
1382
1383 if (ios->clock && ios->clock != sc->aw_clock) {
1384 sc->aw_clock = clock = ios->clock;
1385
1386 /* Disable clock */
1387 error = aw_mmc_update_clock(sc, 0);
1388 if (error != 0)
1389 return (error);
1390
1391 if (ios->timing == bus_timing_mmc_ddr52 &&
1392 (sc->aw_mmc_conf->new_timing ||
1393 ios->bus_width == bus_width_8)) {
1394 div = 2;
1395 clock <<= 1;
1396 }
1397
1398 /* Reset the divider. */
1399 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR);
1400 reg &= ~AW_MMC_CKCR_DIV;
1401 reg |= div - 1;
1402 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg);
1403
1404 /* New timing mode if needed */
1405 if (sc->aw_mmc_conf->new_timing) {
1406 reg = AW_MMC_READ_4(sc, AW_MMC_NTSR);
1407 reg |= AW_MMC_NTSR_MODE_SELECT;
1408 AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg);
1409 }
1410
1411 /* Set the MMC clock. */
1412 error = clk_disable(sc->aw_clk_mmc);
1413 if (error != 0 && bootverbose)
1414 device_printf(sc->aw_dev,
1415 "failed to disable mmc clock: %d\n", error);
1416 error = clk_set_freq(sc->aw_clk_mmc, clock,
1417 CLK_SET_ROUND_DOWN);
1418 if (error != 0) {
1419 device_printf(sc->aw_dev,
1420 "failed to set frequency to %u Hz: %d\n",
1421 clock, error);
1422 return (error);
1423 }
1424 error = clk_enable(sc->aw_clk_mmc);
1425 if (error != 0 && bootverbose)
1426 device_printf(sc->aw_dev,
1427 "failed to re-enable mmc clock: %d\n", error);
1428
1429 if (sc->aw_mmc_conf->can_calibrate)
1430 AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN);
1431
1432 /* Enable clock. */
1433 error = aw_mmc_update_clock(sc, 1);
1434 if (error != 0)
1435 return (error);
1436 }
1437
1438 return (0);
1439 }
1440
1441 #ifndef MMCCAM
1442 static int
1443 aw_mmc_get_ro(device_t bus, device_t child)
1444 {
1445 struct aw_mmc_softc *sc;
1446
1447 sc = device_get_softc(bus);
1448
1449 return (mmc_fdt_gpio_get_readonly(&sc->mmc_helper));
1450 }
1451
1452 static int
1453 aw_mmc_acquire_host(device_t bus, device_t child)
1454 {
1455 struct aw_mmc_softc *sc;
1456 int error;
1457
1458 sc = device_get_softc(bus);
1459 AW_MMC_LOCK(sc);
1460 while (sc->aw_bus_busy) {
1461 error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0);
1462 if (error != 0) {
1463 AW_MMC_UNLOCK(sc);
1464 return (error);
1465 }
1466 }
1467 sc->aw_bus_busy++;
1468 AW_MMC_UNLOCK(sc);
1469
1470 return (0);
1471 }
1472
1473 static int
1474 aw_mmc_release_host(device_t bus, device_t child)
1475 {
1476 struct aw_mmc_softc *sc;
1477
1478 sc = device_get_softc(bus);
1479 AW_MMC_LOCK(sc);
1480 sc->aw_bus_busy--;
1481 wakeup(sc);
1482 AW_MMC_UNLOCK(sc);
1483
1484 return (0);
1485 }
1486 #endif
1487
1488 static device_method_t aw_mmc_methods[] = {
1489 /* Device interface */
1490 DEVMETHOD(device_probe, aw_mmc_probe),
1491 DEVMETHOD(device_attach, aw_mmc_attach),
1492 DEVMETHOD(device_detach, aw_mmc_detach),
1493
1494 /* Bus interface */
1495 DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar),
1496 DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar),
1497 DEVMETHOD(bus_add_child, bus_generic_add_child),
1498
1499 #ifndef MMCCAM
1500 /* MMC bridge interface */
1501 DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios),
1502 DEVMETHOD(mmcbr_request, aw_mmc_request),
1503 DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro),
1504 DEVMETHOD(mmcbr_switch_vccq, aw_mmc_switch_vccq),
1505 DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host),
1506 DEVMETHOD(mmcbr_release_host, aw_mmc_release_host),
1507 #endif
1508
1509 #ifdef MMCCAM
1510 /* MMCCAM interface */
1511 DEVMETHOD(mmc_sim_get_tran_settings, aw_mmc_get_tran_settings),
1512 DEVMETHOD(mmc_sim_set_tran_settings, aw_mmc_set_tran_settings),
1513 DEVMETHOD(mmc_sim_cam_request, aw_mmc_cam_request),
1514 DEVMETHOD(mmc_sim_cam_poll, aw_mmc_cam_poll),
1515 #endif
1516
1517 DEVMETHOD_END
1518 };
1519
1520 static driver_t aw_mmc_driver = {
1521 "aw_mmc",
1522 aw_mmc_methods,
1523 sizeof(struct aw_mmc_softc),
1524 };
1525
1526 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, NULL, NULL);
1527 #ifndef MMCCAM
1528 MMC_DECLARE_BRIDGE(aw_mmc);
1529 #endif
1530 SIMPLEBUS_PNP_INFO(compat_data);
1531